diff --git a/.gitignore b/.gitignore index d8f8bca6ec5faa7f12c78c2dbeb9c7d11e903b97..f1e7651dabdb487f76efa9c992407bb077feac35 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ build/ log/ nohup.out .DS_Store +.idea diff --git a/MANIFEST.in b/MANIFEST.in index b0a4f6dc151b0e11d83655d3f7ef40c200a88ee8..97372da0035488913c83dfe6f2ddfb8fe0c906c3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,7 +1,8 @@ include LICENSE.txt include README.md include docs/en/whl_en.md -recursive-include deploy/python predict_cls.py preprocess.py postprocess.py det_preprocess.py +recursive-include deploy/python *.py +recursive-include deploy/configs *.yaml recursive-include deploy/utils get_image_list.py config.py logger.py predictor.py recursive-include ppcls/ *.py *.txt \ No newline at end of file diff --git a/README.md b/README.md index 44885f554afdc7e00188fae2987e7fbbb4278fcc..13c4f964bb9063f28d6e08dfb8c6b828a81d2536 120000 --- a/README.md +++ b/README.md @@ -1 +1 @@ -README_ch.md \ No newline at end of file +README_en.md \ No newline at end of file diff --git a/README_ch.md b/README_ch.md index 9219857fd950c4d5a4c96ae28ad80d7c5e060cb1..2ca73fdc5b2c1b1e504cf4ec8eef2d0dcb13deb4 100644 --- a/README_ch.md +++ b/README_ch.md @@ -4,100 +4,130 @@ ## 简介 -飞桨图像识别套件PaddleClas是飞桨为工业界和学术界所准备的一个图像识别任务的工具集,助力使用者训练出更好的视觉模型和应用落地。 +飞桨图像识别套件PaddleClas是飞桨为工业界和学术界所准备的一个图像识别和图像分类任务的工具集,助力使用者训练出更好的视觉模型和应用落地。 -**近期更新** -- 2022.4.21 新增 CVPR2022 oral论文 [MixFormer](https://arxiv.org/pdf/2204.02557.pdf) 相关[代码](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files)。 -- 2022.1.27 全面升级文档;新增[PaddleServing C++ pipeline部署方式](./deploy/paddleserving)和[18M图像识别安卓部署Demo](./deploy/lite_shitu)。 -- 2021.11.1 发布[PP-ShiTu技术报告](https://arxiv.org/pdf/2111.00775.pdf),新增饮料识别demo -- 2021.10.23 发布轻量级图像识别系统PP-ShiTu,CPU上0.2s即可完成在10w+库的图像识别。 -[点击这里](./docs/zh_CN/quick_start/quick_start_recognition.md)立即体验 -- 2021.09.17 发布PP-LCNet系列超轻量骨干网络模型, 在Intel CPU上,单张图像预测速度约5ms,ImageNet-1K数据集上Top1识别准确率达到80.82%,超越ResNet152的模型效果。PP-LCNet的介绍可以参考[论文](https://arxiv.org/pdf/2109.15099.pdf), 或者[PP-LCNet模型介绍](docs/zh_CN/models/PP-LCNet.md),相关指标和预训练权重可以从 [这里](docs/zh_CN/algorithm_introduction/ImageNet_models.md)下载。 -- [more](./docs/zh_CN/others/update_history.md) - -## 特性 - -- PP-ShiTu轻量图像识别系统:集成了目标检测、特征学习、图像检索等模块,广泛适用于各类图像识别任务。cpu上0.2s即可完成在10w+库的图像识别。 +
+ +

PULC实用图像分类模型效果展示

+
+  -- PP-LCNet轻量级CPU骨干网络:专门为CPU设备打造轻量级骨干网络,速度、精度均远超竞品。 -- 丰富的预训练模型库:提供了36个系列共175个ImageNet预训练模型,其中7个精选系列模型支持结构快速修改。 +
+ +

PP-ShiTu图像识别系统效果展示

+
-- 全面易用的特征学习组件:集成arcmargin, triplet loss等12度量学习方法,通过配置文件即可随意组合切换。 -- SSLD知识蒸馏:14个分类预训练模型,精度普遍提升3%以上;其中ResNet50_vd模型在ImageNet-1k数据集上的Top-1精度达到了84.0%, -Res2Net200_vd预训练模型Top-1精度高达85.1%。 +## 近期更新 +- 📢将于**6月15-6月17日晚20:30** 进行为期三天的课程直播,详细介绍超轻量图像分类方案,对各场景模型优化原理及使用方式进行拆解,之后还有产业案例全流程实操,对各类痛难点解决方案进行手把手教学,加上现场互动答疑,抓紧扫码上车吧!
- +
+- 🔥️ 2022.6.15 发布[PULC超轻量图像分类实用方案](docs/zh_CN/PULC/PULC_train.md),CPU推理3ms,精度比肩SwinTransformer,覆盖人、车、OCR场景九大常见任务。 + +- 2022.5.26 [飞桨产业实践范例直播课](http://aglc.cn/v-c4FAR),解读**超轻量重点区域人员出入管理方案**。 + +- 2022.5.23 新增[人员出入管理范例库](https://aistudio.baidu.com/aistudio/projectdetail/4094475),具体内容可以在 AI Studio 上体验。 + +- 2022.5.20 上线[PP-HGNet](./docs/zh_CN/models/PP-HGNet.md), [PP-LCNetv2](./docs/zh_CN/models/PP-LCNetV2.md)。 + +- 2022.4.21 新增 CVPR2022 oral论文 [MixFormer](https://arxiv.org/pdf/2204.02557.pdf) 相关[代码](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files)。 + +- [more](./docs/zh_CN/others/update_history.md) + +## 特性 + +PaddleClas发布了[PP-HGNet](docs/zh_CN/models/PP-HGNet.md)、[PP-LCNetv2](docs/zh_CN/models/PP-LCNetV2.md)、 [PP-LCNet](docs/zh_CN/models/PP-LCNet.md)和[SSLD半监督知识蒸馏方案](docs/zh_CN/advanced_tutorials/ssld.md)等算法, +并支持多种图像分类、识别相关算法,在此基础上打造[PULC超轻量图像分类方案](docs/zh_CN/PULC/PULC_quickstart.md)和[PP-ShiTu图像识别系统](./docs/zh_CN/quick_start/quick_start_recognition.md)。 +![](https://user-images.githubusercontent.com/19523330/173273046-239a42da-c88d-4c2c-94b1-2134557afa49.png) + ## 欢迎加入技术交流群 -* 您可以扫描下面的QQ/微信二维码(添加小助手微信并回复“C”),加入PaddleClas微信交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。 +* 您可以扫描下面的微信/QQ二维码(添加小助手微信并回复“C”),加入PaddleClas微信交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。
- +
## 快速体验 +PULC超轻量图像分类方案快速体验:[点击这里](docs/zh_CN/PULC/PULC_quickstart.md) + PP-ShiTu图像识别快速体验:[点击这里](./docs/zh_CN/quick_start/quick_start_recognition.md) ## 文档教程 -- 安装说明 - - [安装Paddle](./docs/zh_CN/installation/install_paddle.md) - - [安装PaddleClas](./docs/zh_CN/installation/install_paddleclas.md) -- 快速体验 - - [PP-ShiTu图像识别快速体验](./docs/zh_CN/quick_start/quick_start_recognition.md) - - 图像分类快速体验 - - [尝鲜版](./docs/zh_CN/quick_start/quick_start_classification_new_user.md) - - [进阶版](./docs/zh_CN/quick_start/quick_start_classification_professional.md) - - [多标签分类](./docs/zh_CN/quick_start/quick_start_multilabel_classification.md) +- [环境准备](docs/zh_CN/installation/install_paddleclas.md) +- [PULC超轻量图像分类实用方案](docs/zh_CN/PULC/PULC_train.md) + - [超轻量图像分类快速体验](docs/zh_CN/PULC/PULC_quickstart.md) + - [超轻量图像分类模型库](docs/zh_CN/PULC/PULC_model_list.md) + - [PULC有人/无人分类模型](docs/zh_CN/PULC/PULC_person_exists.md) + - [PULC人体属性识别模型](docs/zh_CN/PULC/PULC_person_attribute.md) + - [PULC佩戴安全帽分类模型](docs/zh_CN/PULC/PULC_safety_helmet.md) + - [PULC交通标志分类模型](docs/zh_CN/PULC/PULC_traffic_sign.md) + - [PULC车辆属性识别模型](docs/zh_CN/PULC/PULC_vehicle_attribute.md) + - [PULC有车/无车分类模型](docs/zh_CN/PULC/PULC_car_exists.md) + - [PULC含文字图像方向分类模型](docs/zh_CN/PULC/PULC_text_image_orientation.md) + - [PULC文本行方向分类模型](docs/zh_CN/PULC/PULC_textline_orientation.md) + - [PULC语种分类模型](docs/zh_CN/PULC/PULC_language_classification.md) + - [模型训练](docs/zh_CN/PULC/PULC_train.md) + - 推理部署 + - [基于python预测引擎推理](docs/zh_CN/inference_deployment/python_deploy.md#1) + - [基于C++预测引擎推理](docs/zh_CN/inference_deployment/cpp_deploy.md) + - [服务化部署](docs/zh_CN/inference_deployment/classification_serving_deploy.md) + - [端侧部署](docs/zh_CN/inference_deployment/paddle_lite_deploy.md) + - [Paddle2ONNX模型转化与预测](deploy/paddle2onnx/readme.md) + - [模型压缩](deploy/slim/README.md) - [PP-ShiTu图像识别系统介绍](#图像识别系统介绍) + - [图像识别快速体验](docs/zh_CN/quick_start/quick_start_recognition.md) + - 模块介绍 - [主体检测](./docs/zh_CN/image_recognition_pipeline/mainbody_detection.md) - - [特征提取](./docs/zh_CN/image_recognition_pipeline/feature_extraction.md) + - [特征提取模型](./docs/zh_CN/image_recognition_pipeline/feature_extraction.md) - [向量检索](./docs/zh_CN/image_recognition_pipeline/vector_search.md) -- [骨干网络和预训练模型库](./docs/zh_CN/algorithm_introduction/ImageNet_models.md) -- 数据准备 - - [图像分类数据集介绍](./docs/zh_CN/data_preparation/classification_dataset.md) - - [图像识别数据集介绍](./docs/zh_CN/data_preparation/recognition_dataset.md) -- 模型训练 - - [图像分类任务](./docs/zh_CN/models_training/classification.md) - - [图像识别任务](./docs/zh_CN/models_training/recognition.md) - - [训练参数调整策略](./docs/zh_CN/models_training/train_strategy.md) - - [配置文件说明](./docs/zh_CN/models_training/config_description.md) -- 模型预测部署 - - [模型导出](./docs/zh_CN/inference_deployment/export_model.md) - - Python/C++ 预测引擎 - - [基于Python预测引擎预测推理](./docs/zh_CN/inference_deployment/python_deploy.md) - - [基于C++分类预测引擎预测推理](./docs/zh_CN/inference_deployment/cpp_deploy.md)、[基于C++的PP-ShiTu预测引擎预测推理](deploy/cpp_shitu/readme.md) - - 服务化部署 - - [Paddle Serving服务化部署(推荐)](./docs/zh_CN/inference_deployment/paddle_serving_deploy.md) - - [Hub serving服务化部署](./docs/zh_CN/inference_deployment/paddle_hub_serving_deploy.md) - - [端侧部署](./deploy/lite/readme.md) - - [whl包预测](./docs/zh_CN/inference_deployment/whl_deploy.md) -- 算法介绍 - - [图像分类任务介绍](./docs/zh_CN/algorithm_introduction/image_classification.md) - - [度量学习介绍](./docs/zh_CN/algorithm_introduction/metric_learning.md) -- 高阶使用 - - [数据增广](./docs/zh_CN/advanced_tutorials/DataAugmentation.md) - - [模型量化](./docs/zh_CN/advanced_tutorials/model_prune_quantization.md) - - [知识蒸馏](./docs/zh_CN/advanced_tutorials/knowledge_distillation.md) - - [PaddleClas结构解析](./docs/zh_CN/advanced_tutorials/code_overview.md) - - [社区贡献指南](./docs/zh_CN/advanced_tutorials/how_to_contribute.md) + - [哈希编码](docs/zh_CN/image_recognition_pipeline/) + - [模型训练](docs/zh_CN/models_training/recognition.md) + - 推理部署 + - [基于python预测引擎推理](docs/zh_CN/inference_deployment/python_deploy.md#2) + - [基于C++预测引擎推理](deploy/cpp_shitu/readme.md) + - [服务化部署](docs/zh_CN/inference_deployment/recognition_serving_deploy.md) + - [端侧部署](deploy/lite_shitu/README.md) +- PP系列骨干网络模型 + - [PP-HGNet](docs/zh_CN/models/PP-HGNet.md) + - [PP-LCNetv2](docs/zh_CN/models/PP-LCNetV2.md) + - [PP-LCNet](docs/zh_CN/models/PP-LCNet.md) +- [SSLD半监督知识蒸馏方案](docs/zh_CN/advanced_tutorials/ssld.md) +- 前沿算法 + - [骨干网络和预训练模型库](docs/zh_CN/algorithm_introduction/ImageNet_models.md) + - [度量学习](docs/zh_CN/algorithm_introduction/metric_learning.md) + - [模型压缩](docs/zh_CN/algorithm_introduction/model_prune_quantization.md) + - [模型蒸馏](docs/zh_CN/algorithm_introduction/knowledge_distillation.md) + - [数据增强](docs/zh_CN/advanced_tutorials/DataAugmentation.md) +- [产业实用范例库](docs/zh_CN/samples) +- [30分钟快速体验图像分类](docs/zh_CN/quick_start/quick_start_classification_new_user.md) - FAQ - - [图像识别精选问题](docs/zh_CN/faq_series/faq_2021_s2.md) - - [图像分类精选问题](docs/zh_CN/faq_series/faq_selected_30.md) - - [图像分类FAQ第一季](docs/zh_CN/faq_series/faq_2020_s1.md) - - [图像分类FAQ第二季](docs/zh_CN/faq_series/faq_2021_s1.md) + - [图像识别精选问题](docs/zh_CN/faq_series/faq_2021_s2.md) + - [图像分类精选问题](docs/zh_CN/faq_series/faq_selected_30.md) + - [图像分类FAQ第一季](docs/zh_CN/faq_series/faq_2020_s1.md) + - [图像分类FAQ第二季](docs/zh_CN/faq_series/faq_2021_s1.md) +- [社区贡献指南](./docs/zh_CN/advanced_tutorials/how_to_contribute.md) - [许可证书](#许可证书) - [贡献代码](#贡献代码) + + +## PULC超轻量图像分类方案 +
+ +
+PULC融合了骨干网络、数据增广、蒸馏等多种前沿算法,可以自动训练得到轻量且高精度的图像分类模型。 +PaddleClas提供了覆盖人、车、OCR场景九大常见任务的分类模型,CPU推理3ms,精度比肩SwinTransformer。 + -## PP-ShiTu图像识别系统介绍 +## PP-ShiTu图像识别系统
@@ -105,6 +135,11 @@ PP-ShiTu图像识别快速体验:[点击这里](./docs/zh_CN/quick_start/quick PP-ShiTu是一个实用的轻量级通用图像识别系统,主要由主体检测、特征学习和向量检索三个模块组成。该系统从骨干网络选择和调整、损失函数的选择、数据增强、学习率变换策略、正则化参数选择、预训练模型使用以及模型裁剪量化8个方面,采用多种策略,对各个模块的模型进行优化,最终得到在CPU上仅0.2s即可完成10w+库的图像识别的系统。更多细节请参考[PP-ShiTu技术方案](https://arxiv.org/pdf/2111.00775.pdf)。 + +## PULC实用图像分类模型效果展示 +
+ +
## PP-ShiTu图像识别系统效果展示 diff --git a/README_en.md b/README_en.md index 9b0d7c85d76cf06eac8fb265abb85c3bb98a275f..4bf960e57f2e56972f889c4bcf6a6d715b903477 100644 --- a/README_en.md +++ b/README_en.md @@ -4,39 +4,41 @@ ## Introduction -PaddleClas is an image recognition toolset for industry and academia, helping users train better computer vision models and apply them in real scenarios. +PaddleClas is an image classification and image recognition toolset for industry and academia, helping users train better computer vision models and apply them in real scenarios. -**Recent updates** - -- 2022.4.21 Added the related [code](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files) of the CVPR2022 oral paper [MixFormer](https://arxiv.org/pdf/2204.02557.pdf). - -- 2021.09.17 Add PP-LCNet series model developed by PaddleClas, these models show strong competitiveness on Intel CPUs. -For the introduction of PP-LCNet, please refer to [paper](https://arxiv.org/pdf/2109.15099.pdf) or [PP-LCNet model introduction](docs/en/models/PP-LCNet_en.md). The metrics and pretrained model are available [here](docs/en/ImageNet_models_en.md). - -- 2021.06.29 Add Swin-transformer series model,Highest top1 acc on ImageNet1k dataset reaches 87.2%, training, evaluation and inference are all supported. Pretrained models can be downloaded [here](docs/en/models/models_intro_en.md). -- 2021.06.16 PaddleClas release/2.2. Add metric learning and vector search modules. Add product recognition, animation character recognition, vehicle recognition and logo recognition. Added 30 pretrained models of LeViT, Twins, TNT, DLA, HarDNet, and RedNet, and the accuracy is roughly the same as that of the paper. -- [more](./docs/en/update_history_en.md) +
+ -## Features +PULC demo images +
+  -- A practical image recognition system consist of detection, feature learning and retrieval modules, widely applicable to all types of image recognition tasks. -Four sample solutions are provided, including product recognition, vehicle recognition, logo recognition and animation character recognition. -- Rich library of pre-trained models: Provide a total of 164 ImageNet pre-trained models in 35 series, among which 6 selected series of models support fast structural modification. +
+ -- Comprehensive and easy-to-use feature learning components: 12 metric learning methods are integrated and can be combined and switched at will through configuration files. +PP-ShiTu demo images +
-- SSLD knowledge distillation: The 14 classification pre-training models generally improved their accuracy by more than 3%; among them, the ResNet50_vd model achieved a Top-1 accuracy of 84.0% on the Image-Net-1k dataset and the Res2Net200_vd pre-training model achieved a Top-1 accuracy of 85.1%. +**Recent updates** +- 2022.6.15 Release [**P**ractical **U**ltra **L**ight-weight image **C**lassification solutions](./docs/en/PULC/PULC_quickstart_en.md). PULC models inference within 3ms on CPU devices, with accuracy on par with SwinTransformer. We also release 9 practical classification models covering pedestrian, vehicle and OCR scenario. +- 2022.4.21 Added the related [code](https://github.com/PaddlePaddle/PaddleClas/pull/1820/files) of the CVPR2022 oral paper [MixFormer](https://arxiv.org/pdf/2204.02557.pdf). -- Data augmentation: Provide 8 data augmentation algorithms such as AutoAugment, Cutout, Cutmix, etc. with detailed introduction, code replication and evaluation of effectiveness in a unified experimental environment. +- 2021.09.17 Add PP-LCNet series model developed by PaddleClas, these models show strong competitiveness on Intel CPUs. +For the introduction of PP-LCNet, please refer to [paper](https://arxiv.org/pdf/2109.15099.pdf) or [PP-LCNet model introduction](docs/en/models/PP-LCNet_en.md). The metrics and pretrained model are available [here](docs/en/algorithm_introduction/ImageNet_models_en.md). +- 2021.06.29 Add [Swin-transformer](docs/en/models/SwinTransformer_en.md)) series model,Highest top1 acc on ImageNet1k dataset reaches 87.2%, training, evaluation and inference are all supported. Pretrained models can be downloaded [here](docs/en/algorithm_introduction/ImageNet_models_en.md#16). +- 2021.06.16 PaddleClas release/2.2. Add metric learning and vector search modules. Add product recognition, animation character recognition, vehicle recognition and logo recognition. Added 30 pretrained models of LeViT, Twins, TNT, DLA, HarDNet, and RedNet, and the accuracy is roughly the same as that of the paper. +- [more](./docs/en/others/update_history_en.md) +## Features +PaddleClas release PP-HGNet、PP-LCNetv2、 PP-LCNet and **S**imple **S**emi-supervised **L**abel **D**istillation algorithms, and support plenty of +image classification and image recognition algorithms. +Based on th algorithms above, PaddleClas release PP-ShiTu image recognition system and [**P**ractical **U**ltra **L**ight-weight image **C**lassification solutions](docs/en/PULC/PULC_quickstart_en.md). -
- -
+![](https://user-images.githubusercontent.com/19523330/173539361-68cf7ab1-7e3b-4e5e-b00f-1500719bd2a2.png) ## Welcome to Join the Technical Exchange Group @@ -48,41 +50,57 @@ Four sample solutions are provided, including product recognition, vehicle recog
## Quick Start -Quick experience of image recognition:[Link](./docs/en/tutorials/quick_start_recognition_en.md) +Quick experience of PP-ShiTu image recognition system:[Link](./docs/en/quick_start/quick_start_recognition_en.md) + +Quick experience of **P**ractical **U**ltra **L**ight-weight image **C**lassification models:[Link](docs/en/PULC/PULC_quickstart_en.md) ## Tutorials -- [Quick Installation](./docs/en/tutorials/install_en.md) -- [Quick Start of Recognition](./docs/en/tutorials/quick_start_recognition_en.md) +- [Install Paddle](./docs/en/installation/install_paddle_en.md) +- [Install PaddleClas Environment](./docs/en/installation/install_paddleclas_en.md) +- [Practical Ultra Light-weight image Classification solutions](./docs/en/PULC/PULC_train_en.md) + - [PULC Quick Start](docs/en/PULC/PULC_quickstart_en.md) + - [PULC Model Zoo](docs/en/PULC/PULC_model_list_en.md) + - [PULC Classification Model of Someone or Nobody](docs/en/PULC/PULC_person_exists_en.md) + - [PULC Recognition Model of Person Attribute](docs/en/PULC/PULC_person_attribute_en.md) + - [PULC Classification Model of Wearing or Unwearing Safety Helmet](docs/en/PULC/PULC_safety_helmet_en.md) + - [PULC Classification Model of Traffic Sign](docs/en/PULC/PULC_traffic_sign_en.md) + - [PULC Recognition Model of Vehicle Attribute](docs/en/PULC/PULC_vehicle_attribute_en.md) + - [PULC Classification Model of Containing or Uncontaining Car](docs/en/PULC/PULC_car_exists_en.md) + - [PULC Classification Model of Text Image Orientation](docs/en/PULC/PULC_text_image_orientation_en.md) + - [PULC Classification Model of Textline Orientation](docs/en/PULC/PULC_textline_orientation_en.md) + - [PULC Classification Model of Language](docs/en/PULC/PULC_language_classification_en.md) +- [Quick Start of Recognition](./docs/en/quick_start/quick_start_recognition_en.md) - [Introduction to Image Recognition Systems](#Introduction_to_Image_Recognition_Systems) -- [Demo images](#Demo_images) +- [Image Recognition Demo images](#Rec_Demo_images) +- [PULC demo images](#Clas_Demo_images) - Algorithms Introduction - - [Backbone Network and Pre-trained Model Library](./docs/en/ImageNet_models_en.md) - - [Mainbody Detection](./docs/en/application/mainbody_detection_en.md) - - [Image Classification](./docs/en/tutorials/image_classification_en.md) - - [Feature Learning](./docs/en/application/feature_learning_en.md) - - [Product Recognition](./docs/en/application/product_recognition_en.md) - - [Vehicle Recognition](./docs/en/application/vehicle_recognition_en.md) - - [Logo Recognition](./docs/en/application/logo_recognition_en.md) - - [Animation Character Recognition](./docs/en/application/cartoon_character_recognition_en.md) + - [Backbone Network and Pre-trained Model Library](./docs/en/algorithm_introduction/ImageNet_models_en.md) + - [Mainbody Detection](./docs/en/image_recognition_pipeline/mainbody_detection_en.md) + - [Feature Learning](./docs/en/image_recognition_pipeline/feature_extraction_en.md) - [Vector Search](./deploy/vector_search/README.md) -- Models Training/Evaluation - - [Image Classification](./docs/en/tutorials/getting_started_en.md) - - [Feature Learning](./docs/en/tutorials/getting_started_retrieval_en.md) - Inference Model Prediction - - [Python Inference](./docs/en/inference.md) + - [Python Inference](./docs/en/inference_deployment/python_deploy_en.md) - [C++ Classfication Inference](./deploy/cpp/readme_en.md), [C++ PP-ShiTu Inference](deploy/cpp_shitu/readme_en.md) - Model Deploy (only support classification for now, recognition coming soon) - [Hub Serving Deployment](./deploy/hubserving/readme_en.md) - [Mobile Deployment](./deploy/lite/readme_en.md) - - [Inference Using whl](./docs/en/whl_en.md) + - [Inference Using whl](./docs/en/inference_deployment/whl_deploy_en.md) - Advanced Tutorial - [Knowledge Distillation](./docs/en/advanced_tutorials/distillation/distillation_en.md) - - [Model Quantization](./docs/en/extension/paddle_quantization_en.md) - - [Data Augmentation](./docs/en/advanced_tutorials/image_augmentation/ImageAugment_en.md) + - [Model Quantization](./docs/en/algorithm_introduction/model_prune_quantization_en.md) + - [Data Augmentation](./docs/en/advanced_tutorials/DataAugmentation_en.md) - [License](#License) - [Contribution](#Contribution) + +## Introduction to Practical Ultra Light-weight image Classification solutions +
+ +
+PULC solutions consists of PP-LCNet light-weight backbone, SSLD pretrained models, Ensemble of Data Augmentation strategy and SKL-UGI knowledge distillation. +PULC models inference within 3ms on CPU devices, with accuracy comparable with SwinTransformer. We also release 9 practical models covering pedestrian, vehicle and OCR. + ## Introduction to Image Recognition Systems @@ -97,8 +115,14 @@ Image recognition can be divided into three steps: For a new unknown category, there is no need to retrain the model, just prepare images of new category, extract features and update retrieval database and the category can be recognised. - -## Demo images [more](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.2/docs/images/recognition/more_demo_images) + +## PULC demo images +
+ +
+ + +## Image Recognition Demo images [more](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.2/docs/images/recognition/more_demo_images) - Product recognition
diff --git a/deploy/configs/PULC/car_exists/inference_car_exists.yaml b/deploy/configs/PULC/car_exists/inference_car_exists.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6733069d99b5622c83321bc628f3d70274ce8d4 --- /dev/null +++ b/deploy/configs/PULC/car_exists/inference_car_exists.yaml @@ -0,0 +1,36 @@ +Global: + infer_imgs: "./images/PULC/car_exists/objects365_00001507.jpeg" + inference_model_dir: "./models/car_exists_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: False + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: ThreshOutput + ThreshOutput: + threshold: 0.5 + label_0: no_car + label_1: contains_car + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/PULC/language_classification/inference_language_classification.yaml b/deploy/configs/PULC/language_classification/inference_language_classification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb9fb6b6631e774e7486bcdb31c25621e2b7d790 --- /dev/null +++ b/deploy/configs/PULC/language_classification/inference_language_classification.yaml @@ -0,0 +1,33 @@ +Global: + infer_imgs: "./images/PULC/language_classification/word_35404.png" + inference_model_dir: "./models/language_classification_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: False + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: Topk + Topk: + topk: 2 + class_id_map_file: "../ppcls/utils/PULC_label_list/language_classification_label_list.txt" + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/PULC/person_attribute/inference_person_attribute.yaml b/deploy/configs/PULC/person_attribute/inference_person_attribute.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5be2a3568291d0a31a7026974fc22ecf54a8f4c --- /dev/null +++ b/deploy/configs/PULC/person_attribute/inference_person_attribute.yaml @@ -0,0 +1,32 @@ +Global: + infer_imgs: "./images/PULC/person_attribute/090004.jpg" + inference_model_dir: "./models/person_attribute_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + benchmark: False + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: PersonAttribute + PersonAttribute: + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold diff --git a/deploy/configs/PULC/person_exists/inference_person_exists.yaml b/deploy/configs/PULC/person_exists/inference_person_exists.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3df94a80c7c75814e778e5320a31b20a8a7eb742 --- /dev/null +++ b/deploy/configs/PULC/person_exists/inference_person_exists.yaml @@ -0,0 +1,36 @@ +Global: + infer_imgs: "./images/PULC/person_exists/objects365_02035329.jpg" + inference_model_dir: "./models/person_exists_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: False + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: ThreshOutput + ThreshOutput: + threshold: 0.5 + label_0: nobody + label_1: someone + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/PULC/safety_helmet/inference_safety_helmet.yaml b/deploy/configs/PULC/safety_helmet/inference_safety_helmet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66a4cebb359a9b1f03a205ee6a031ca6464cffa8 --- /dev/null +++ b/deploy/configs/PULC/safety_helmet/inference_safety_helmet.yaml @@ -0,0 +1,36 @@ +Global: + infer_imgs: "./images/PULC/safety_helmet/safety_helmet_test_1.png" + inference_model_dir: "./models/safety_helmet_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: False + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: ThreshOutput + ThreshOutput: + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/PULC/text_image_orientation/inference_text_image_orientation.yaml b/deploy/configs/PULC/text_image_orientation/inference_text_image_orientation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c6c3969ffa627288fe58fab28b3fe1cbffe9dd03 --- /dev/null +++ b/deploy/configs/PULC/text_image_orientation/inference_text_image_orientation.yaml @@ -0,0 +1,35 @@ +Global: + infer_imgs: "./images/PULC/text_image_orientation/img_rot0_demo.jpg" + inference_model_dir: "./models/text_image_orientation_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: False + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: Topk + Topk: + topk: 2 + class_id_map_file: "../ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt" + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/PULC/textline_orientation/inference_textline_orientation.yaml b/deploy/configs/PULC/textline_orientation/inference_textline_orientation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..108b3dd53a95c06345bdd7ccd34b2e5252d2df19 --- /dev/null +++ b/deploy/configs/PULC/textline_orientation/inference_textline_orientation.yaml @@ -0,0 +1,33 @@ +Global: + infer_imgs: "./images/PULC/textline_orientation/textline_orientation_test_0_0.png" + inference_model_dir: "./models/textline_orientation_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: Topk + Topk: + topk: 1 + class_id_map_file: "../ppcls/utils/PULC_label_list/textline_orientation_label_list.txt" + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/PULC/traffic_sign/inference_traffic_sign.yaml b/deploy/configs/PULC/traffic_sign/inference_traffic_sign.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53699718b4fdd38da86eaee4cccc584dcc87d2b7 --- /dev/null +++ b/deploy/configs/PULC/traffic_sign/inference_traffic_sign.yaml @@ -0,0 +1,35 @@ +Global: + infer_imgs: "./images/PULC/traffic_sign/99603_17806.jpg" + inference_model_dir: "./models/traffic_sign_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + benchmark: False + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: Topk + Topk: + topk: 5 + class_id_map_file: "../ppcls/utils/PULC_label_list/traffic_sign_label_list.txt" + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml b/deploy/configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14ae348d09faca113d5863fbb57f066675b3f447 --- /dev/null +++ b/deploy/configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml @@ -0,0 +1,32 @@ +Global: + infer_imgs: "./images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg" + inference_model_dir: "./models/vehicle_attribute_infer" + batch_size: 1 + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + benchmark: False + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: VehicleAttribute + VehicleAttribute: + color_threshold: 0.5 + type_threshold: 0.5 + diff --git a/deploy/configs/inference_attr.yaml b/deploy/configs/inference_attr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88f73db5419414812450b768ac783982386f0a78 --- /dev/null +++ b/deploy/configs/inference_attr.yaml @@ -0,0 +1,33 @@ +Global: + infer_imgs: "./images/Pedestrain_Attr.jpg" + inference_model_dir: "../inference/" + batch_size: 1 + use_gpu: True + enable_mkldnn: False + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: PersonAttribute + PersonAttribute: + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + diff --git a/deploy/configs/inference_cartoon.yaml b/deploy/configs/inference_cartoon.yaml index 7d93d98cc0696d8e1508e02db2cc864d6f917d19..e79da55090130223466fd6b6a078b9909d6e26f2 100644 --- a/deploy/configs/inference_cartoon.yaml +++ b/deploy/configs/inference_cartoon.yaml @@ -8,7 +8,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 5 - labe_list: + label_list: - foreground use_gpu: True diff --git a/deploy/configs/inference_cls.yaml b/deploy/configs/inference_cls.yaml index fc0f0fe67aa628e504bb6fcb743f29fd020548cc..d9181278cc617822f98e4966abf0d12ceca498a4 100644 --- a/deploy/configs/inference_cls.yaml +++ b/deploy/configs/inference_cls.yaml @@ -1,5 +1,5 @@ Global: - infer_imgs: "./images/ILSVRC2012_val_00000010.jpeg" + infer_imgs: "./images/ImageNet/ILSVRC2012_val_00000010.jpeg" inference_model_dir: "./models" batch_size: 1 use_gpu: True @@ -32,4 +32,4 @@ PostProcess: topk: 5 class_id_map_file: "../ppcls/utils/imagenet1k_label_list.txt" SavePreLabel: - save_dir: ./pre_label/ \ No newline at end of file + save_dir: ./pre_label/ diff --git a/deploy/configs/inference_cls_based_action.yaml b/deploy/configs/inference_cls_based_action.yaml new file mode 100644 index 0000000000000000000000000000000000000000..005301c2ab395277020ef34db644cb1ffc26c2c3 --- /dev/null +++ b/deploy/configs/inference_cls_based_action.yaml @@ -0,0 +1,33 @@ +Global: + infer_imgs: "./images/ImageNet/ILSVRC2012_val_00000010.jpeg" + inference_model_dir: "./models/PPHGNet_tiny_calling_halfbody/" + batch_size: 1 + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + channel_num: 3 + - ToCHWImage: + +PostProcess: + main_indicator: Topk + Topk: + topk: 2 + class_id_map_file: "../dataset/data/phone_label_list.txt" + SavePreLabel: + save_dir: ./pre_label/ diff --git a/deploy/configs/inference_cls_ch4.yaml b/deploy/configs/inference_cls_ch4.yaml index 9b740ed8293c3d66a325682cafc42e2b1415df4d..85f9acb29a88772da63abe302354f5e17a9c3e59 100644 --- a/deploy/configs/inference_cls_ch4.yaml +++ b/deploy/configs/inference_cls_ch4.yaml @@ -1,5 +1,5 @@ Global: - infer_imgs: "./images/ILSVRC2012_val_00000010.jpeg" + infer_imgs: "./images/ImageNet/ILSVRC2012_val_00000010.jpeg" inference_model_dir: "./models" batch_size: 1 use_gpu: True @@ -32,4 +32,4 @@ PostProcess: topk: 5 class_id_map_file: "../ppcls/utils/imagenet1k_label_list.txt" SavePreLabel: - save_dir: ./pre_label/ \ No newline at end of file + save_dir: ./pre_label/ diff --git a/deploy/configs/inference_det.yaml b/deploy/configs/inference_det.yaml index c809a0257bc7c5b774f20fb3edb50a08e7d67bbb..dab7908ef7f59bfed077d9189811aedb650b0e92 100644 --- a/deploy/configs/inference_det.yaml +++ b/deploy/configs/inference_det.yaml @@ -5,7 +5,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 1 - labe_list: + label_list: - foreground # inference engine config diff --git a/deploy/configs/inference_drink.yaml b/deploy/configs/inference_drink.yaml index d044965f446634dcc151fd496a9d7b403b869d68..1c3e2c29aa8ddd5db46bbc8660c9f45942696a9c 100644 --- a/deploy/configs/inference_drink.yaml +++ b/deploy/configs/inference_drink.yaml @@ -8,7 +8,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 5 - labe_list: + label_list: - foreground use_gpu: True diff --git a/deploy/configs/inference_general.yaml b/deploy/configs/inference_general.yaml index 6b397b5047b427d02014060380112b096e0b2da2..8fb8ae3a56697b882be00da554f33750ead42f70 100644 --- a/deploy/configs/inference_general.yaml +++ b/deploy/configs/inference_general.yaml @@ -8,7 +8,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 5 - labe_list: + label_list: - foreground use_gpu: True diff --git a/deploy/configs/inference_general_binary.yaml b/deploy/configs/inference_general_binary.yaml index d76dae8f8f7c70f27996f6b20fd623bdc00bc441..72ec31fc438d1f884bada59507a90d172ab4a416 100644 --- a/deploy/configs/inference_general_binary.yaml +++ b/deploy/configs/inference_general_binary.yaml @@ -8,7 +8,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 5 - labe_list: + label_list: - foreground use_gpu: True diff --git a/deploy/configs/inference_logo.yaml b/deploy/configs/inference_logo.yaml index f78ca25a042b3224a973d81f7b0242ace7c25430..2b8228eab772f8b1488275163518a6e059a49c53 100644 --- a/deploy/configs/inference_logo.yaml +++ b/deploy/configs/inference_logo.yaml @@ -8,7 +8,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 5 - labe_list: + label_list: - foreground use_gpu: True diff --git a/deploy/configs/inference_product.yaml b/deploy/configs/inference_product.yaml index e7b494c383aa5f42b4515446805b1357ba43107c..78ba32068cb696e897c39d516e66b323bd12ad61 100644 --- a/deploy/configs/inference_product.yaml +++ b/deploy/configs/inference_product.yaml @@ -8,7 +8,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 5 - labe_list: + label_list: - foreground # inference engine config diff --git a/deploy/configs/inference_vehicle.yaml b/deploy/configs/inference_vehicle.yaml index d99f42ad684150f1efeaf65f031ee1ea707fee37..e289e9f523b061dd26b8d687e594499dd7cdec37 100644 --- a/deploy/configs/inference_vehicle.yaml +++ b/deploy/configs/inference_vehicle.yaml @@ -8,7 +8,7 @@ Global: image_shape: [3, 640, 640] threshold: 0.2 max_det_results: 5 - labe_list: + label_list: - foreground use_gpu: True diff --git a/deploy/cpp_shitu/include/object_detector.h b/deploy/cpp_shitu/include/object_detector.h index 5bfc56253b1845a50f3b6b093db314e97505cfef..6855a0dcc84c2711283fe8d23ba1d2afe376fb0e 100644 --- a/deploy/cpp_shitu/include/object_detector.h +++ b/deploy/cpp_shitu/include/object_detector.h @@ -33,106 +33,106 @@ using namespace paddle_infer; namespace Detection { // Object Detection Result - struct ObjectResult { - // Rectangle coordinates of detected object: left, right, top, down - std::vector rect; - // Class id of detected object - int class_id; - // Confidence of detected object - float confidence; - }; +struct ObjectResult { + // Rectangle coordinates of detected object: left, right, top, down + std::vector rect; + // Class id of detected object + int class_id; + // Confidence of detected object + float confidence; +}; // Generate visualization colormap for each class - std::vector GenerateColorMap(int num_class); +std::vector GenerateColorMap(int num_class); // Visualiztion Detection Result - cv::Mat VisualizeResult(const cv::Mat &img, - const std::vector &results, - const std::vector &lables, - const std::vector &colormap, const bool is_rbox); - - class ObjectDetector { - public: - explicit ObjectDetector(const YAML::Node &config_file) { - this->use_gpu_ = config_file["Global"]["use_gpu"].as(); - if (config_file["Global"]["gpu_id"].IsDefined()) - this->gpu_id_ = config_file["Global"]["gpu_id"].as(); - this->gpu_mem_ = config_file["Global"]["gpu_mem"].as(); - this->cpu_math_library_num_threads_ = - config_file["Global"]["cpu_num_threads"].as(); - this->use_mkldnn_ = config_file["Global"]["enable_mkldnn"].as(); - this->use_tensorrt_ = config_file["Global"]["use_tensorrt"].as(); - this->use_fp16_ = config_file["Global"]["use_fp16"].as(); - this->model_dir_ = - config_file["Global"]["det_inference_model_dir"].as(); - this->threshold_ = config_file["Global"]["threshold"].as(); - this->max_det_results_ = config_file["Global"]["max_det_results"].as(); - this->image_shape_ = - config_file["Global"]["image_shape"].as < std::vector < int >> (); - this->label_list_ = - config_file["Global"]["labe_list"].as < std::vector < std::string >> (); - this->ir_optim_ = config_file["Global"]["ir_optim"].as(); - this->batch_size_ = config_file["Global"]["batch_size"].as(); - - preprocessor_.Init(config_file["DetPreProcess"]["transform_ops"]); - LoadModel(model_dir_, batch_size_, run_mode); - } - - // Load Paddle inference model - void LoadModel(const std::string &model_dir, const int batch_size = 1, - const std::string &run_mode = "fluid"); - - // Run predictor - void Predict(const std::vector imgs, const int warmup = 0, - const int repeats = 1, - std::vector *result = nullptr, - std::vector *bbox_num = nullptr, - std::vector *times = nullptr); - - const std::vector &GetLabelList() const { - return this->label_list_; - } - - const float &GetThreshold() const { return this->threshold_; } - - private: - bool use_gpu_ = true; - int gpu_id_ = 0; - int gpu_mem_ = 800; - int cpu_math_library_num_threads_ = 6; - std::string run_mode = "fluid"; - bool use_mkldnn_ = false; - bool use_tensorrt_ = false; - bool batch_size_ = 1; - bool use_fp16_ = false; - std::string model_dir_; - float threshold_ = 0.5; - float max_det_results_ = 5; - std::vector image_shape_ = {3, 640, 640}; - std::vector label_list_; - bool ir_optim_ = true; - bool det_permute_ = true; - bool det_postprocess_ = true; - int min_subgraph_size_ = 30; - bool use_dynamic_shape_ = false; - int trt_min_shape_ = 1; - int trt_max_shape_ = 1280; - int trt_opt_shape_ = 640; - bool trt_calib_mode_ = false; - - // Preprocess image and copy data to input buffer - void Preprocess(const cv::Mat &image_mat); - - // Postprocess result - void Postprocess(const std::vector mats, - std::vector *result, std::vector bbox_num, - bool is_rbox); - - std::shared_ptr predictor_; - Preprocessor preprocessor_; - ImageBlob inputs_; - std::vector output_data_; - std::vector out_bbox_num_data_; - }; +cv::Mat VisualizeResult(const cv::Mat &img, + const std::vector &results, + const std::vector &lables, + const std::vector &colormap, const bool is_rbox); + +class ObjectDetector { +public: + explicit ObjectDetector(const YAML::Node &config_file) { + this->use_gpu_ = config_file["Global"]["use_gpu"].as(); + if (config_file["Global"]["gpu_id"].IsDefined()) + this->gpu_id_ = config_file["Global"]["gpu_id"].as(); + this->gpu_mem_ = config_file["Global"]["gpu_mem"].as(); + this->cpu_math_library_num_threads_ = + config_file["Global"]["cpu_num_threads"].as(); + this->use_mkldnn_ = config_file["Global"]["enable_mkldnn"].as(); + this->use_tensorrt_ = config_file["Global"]["use_tensorrt"].as(); + this->use_fp16_ = config_file["Global"]["use_fp16"].as(); + this->model_dir_ = + config_file["Global"]["det_inference_model_dir"].as(); + this->threshold_ = config_file["Global"]["threshold"].as(); + this->max_det_results_ = config_file["Global"]["max_det_results"].as(); + this->image_shape_ = + config_file["Global"]["image_shape"].as>(); + this->label_list_ = + config_file["Global"]["label_list"].as>(); + this->ir_optim_ = config_file["Global"]["ir_optim"].as(); + this->batch_size_ = config_file["Global"]["batch_size"].as(); + + preprocessor_.Init(config_file["DetPreProcess"]["transform_ops"]); + LoadModel(model_dir_, batch_size_, run_mode); + } + + // Load Paddle inference model + void LoadModel(const std::string &model_dir, const int batch_size = 1, + const std::string &run_mode = "fluid"); + + // Run predictor + void Predict(const std::vector imgs, const int warmup = 0, + const int repeats = 1, + std::vector *result = nullptr, + std::vector *bbox_num = nullptr, + std::vector *times = nullptr); + + const std::vector &GetLabelList() const { + return this->label_list_; + } + + const float &GetThreshold() const { return this->threshold_; } + +private: + bool use_gpu_ = true; + int gpu_id_ = 0; + int gpu_mem_ = 800; + int cpu_math_library_num_threads_ = 6; + std::string run_mode = "fluid"; + bool use_mkldnn_ = false; + bool use_tensorrt_ = false; + bool batch_size_ = 1; + bool use_fp16_ = false; + std::string model_dir_; + float threshold_ = 0.5; + float max_det_results_ = 5; + std::vector image_shape_ = {3, 640, 640}; + std::vector label_list_; + bool ir_optim_ = true; + bool det_permute_ = true; + bool det_postprocess_ = true; + int min_subgraph_size_ = 30; + bool use_dynamic_shape_ = false; + int trt_min_shape_ = 1; + int trt_max_shape_ = 1280; + int trt_opt_shape_ = 640; + bool trt_calib_mode_ = false; + + // Preprocess image and copy data to input buffer + void Preprocess(const cv::Mat &image_mat); + + // Postprocess result + void Postprocess(const std::vector mats, + std::vector *result, std::vector bbox_num, + bool is_rbox); + + std::shared_ptr predictor_; + Preprocessor preprocessor_; + ImageBlob inputs_; + std::vector output_data_; + std::vector out_bbox_num_data_; +}; } // namespace Detection diff --git a/deploy/hubserving/readme.md b/deploy/hubserving/readme.md index 6b2b2dd4dd703965f52fa7d16cd6be41672186a9..8506c9e4144b4792a06ff36de6c0f6d4698b40cf 100644 --- a/deploy/hubserving/readme.md +++ b/deploy/hubserving/readme.md @@ -1,83 +1,117 @@ -[English](readme_en.md) | 简体中文 +简体中文 | [English](readme_en.md) -# 基于PaddleHub Serving的服务部署 +# 基于 PaddleHub Serving 的服务部署 -hubserving服务部署配置服务包`clas`下包含3个必选文件,目录如下: -``` -hubserving/clas/ - └─ __init__.py 空文件,必选 - └─ config.json 配置文件,可选,使用配置启动服务时作为参数传入 - └─ module.py 主模块,必选,包含服务的完整逻辑 - └─ params.py 参数文件,必选,包含模型路径、前后处理参数等参数 +PaddleClas 支持通过 PaddleHub 快速进行服务化部署。目前支持图像分类的部署,图像识别的部署敬请期待。 + + +## 目录 +- [1. 简介](#1-简介) +- [2. 准备环境](#2-准备环境) +- [3. 下载推理模型](#3-下载推理模型) +- [4. 安装服务模块](#4-安装服务模块) +- [5. 启动服务](#5-启动服务) + - [5.1 命令行启动](#51-命令行启动) + - [5.2 配置文件启动](#52-配置文件启动) +- [6. 发送预测请求](#6-发送预测请求) +- [7. 自定义修改服务模块](#7-自定义修改服务模块) + + + +## 1. 简介 + +hubserving 服务部署配置服务包 `clas` 下包含 3 个必选文件,目录如下: + +```shell +deploy/hubserving/clas/ +├── __init__.py # 空文件,必选 +├── config.json # 配置文件,可选,使用配置启动服务时作为参数传入 +├── module.py # 主模块,必选,包含服务的完整逻辑 +└── params.py # 参数文件,必选,包含模型路径、前后处理参数等参数 ``` -## 快速启动服务 -### 1. 准备环境 + + +## 2. 准备环境 ```shell -# 安装paddlehub,请安装2.0版本 -pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple +# 安装 paddlehub,建议安装 2.1.0 版本 +python3.7 -m pip install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple ``` -### 2. 下载推理模型 + + +## 3. 下载推理模型 + 安装服务模块前,需要准备推理模型并放到正确路径,默认模型路径为: -``` -分类推理模型结构文件:PaddleClas/inference/inference.pdmodel -分类推理模型权重文件:PaddleClas/inference/inference.pdiparams -``` + +* 分类推理模型结构文件:`PaddleClas/inference/inference.pdmodel` +* 分类推理模型权重文件:`PaddleClas/inference/inference.pdiparams` **注意**: -* 模型文件路径可在`PaddleClas/deploy/hubserving/clas/params.py`中查看和修改: +* 模型文件路径可在 `PaddleClas/deploy/hubserving/clas/params.py` 中查看和修改: + ```python "inference_model_dir": "../inference/" ``` - 需要注意,模型文件(包括.pdmodel与.pdiparams)名称必须为`inference`。 -* 我们也提供了大量基于ImageNet-1k数据集的预训练模型,模型列表及下载地址详见[模型库概览](../../docs/zh_CN/models/models_intro.md),也可以使用自己训练转换好的模型。 +* 模型文件(包括 `.pdmodel` 与 `.pdiparams`)的名称必须为 `inference`。 +* 我们提供了大量基于 ImageNet-1k 数据集的预训练模型,模型列表及下载地址详见[模型库概览](../../docs/zh_CN/algorithm_introduction/ImageNet_models.md),也可以使用自己训练转换好的模型。 -### 3. 安装服务模块 -针对Linux环境和Windows环境,安装命令如下。 -* 在Linux环境下,安装示例如下: -```shell -cd PaddleClas/deploy -# 安装服务模块: -hub install hubserving/clas/ -``` + +## 4. 安装服务模块 + +* 在 Linux 环境下,安装示例如下: + ```shell + cd PaddleClas/deploy + # 安装服务模块: + hub install hubserving/clas/ + ``` + +* 在 Windows 环境下(文件夹的分隔符为`\`),安装示例如下: + + ```shell + cd PaddleClas\deploy + # 安装服务模块: + hub install hubserving\clas\ + ``` + -* 在Windows环境下(文件夹的分隔符为`\`),安装示例如下: + +## 5. 启动服务 + + + +### 5.1 命令行启动 + +该方式仅支持使用 CPU 预测。启动命令: ```shell -cd PaddleClas\deploy -# 安装服务模块: -hub install hubserving\clas\ +hub serving start \ +--modules clas_system +--port 8866 ``` +这样就完成了一个服务化 API 的部署,使用默认端口号 8866。 -### 4. 启动服务 -#### 方式1. 命令行命令启动(仅支持CPU) -**启动命令:** -```shell -$ hub serving start --modules Module1==Version1 \ - --port XXXX \ - --use_multiprocess \ - --workers \ -``` +**参数说明**: +| 参数 | 用途 | +| ------------------ | ----------------------------------------------------------------------------------------------------------------------------- | +| --modules/-m | [**必选**] PaddleHub Serving 预安装模型,以多个 Module==Version 键值对的形式列出
*`当不指定 Version 时,默认选择最新版本`* | +| --port/-p | [**可选**] 服务端口,默认为 8866 | +| --use_multiprocess | [**可选**] 是否启用并发方式,默认为单进程方式,推荐多核 CPU 机器使用此方式
*`Windows 操作系统只支持单进程方式`* | +| --workers | [**可选**] 在并发方式下指定的并发任务数,默认为 `2*cpu_count-1`,其中 `cpu_count` 为 CPU 核数 | +更多部署细节详见 [PaddleHub Serving模型一键服务部署](https://paddlehub.readthedocs.io/zh_CN/release-v2.1/tutorial/serving.html) -**参数:** -|参数|用途| -|-|-| -|--modules/-m| [**必选**] PaddleHub Serving预安装模型,以多个Module==Version键值对的形式列出
*`当不指定Version时,默认选择最新版本`*| -|--port/-p| [**可选**] 服务端口,默认为8866| -|--use_multiprocess| [**可选**] 是否启用并发方式,默认为单进程方式,推荐多核CPU机器使用此方式
*`Windows操作系统只支持单进程方式`*| -|--workers| [**可选**] 在并发方式下指定的并发任务数,默认为`2*cpu_count-1`,其中`cpu_count`为CPU核数| + +### 5.2 配置文件启动 -如按默认参数启动服务: ```hub serving start -m clas_system``` +该方式仅支持使用 CPU 或 GPU 预测。启动命令: -这样就完成了一个服务化API的部署,使用默认端口号8866。 +```shell +hub serving start -c config.json +``` -#### 方式2. 配置文件启动(支持CPU、GPU) -**启动命令:** -```hub serving start -c config.json``` +其中,`config.json` 格式如下: -其中,`config.json`格式如下: ```json { "modules_info": { @@ -97,92 +131,109 @@ $ hub serving start --modules Module1==Version1 \ } ``` -- `init_args`中的可配参数与`module.py`中的`_initialize`函数接口一致。其中, - - 当`use_gpu`为`true`时,表示使用GPU启动服务。 - - 当`enable_mkldnn`为`true`时,表示使用MKL-DNN加速。 -- `predict_args`中的可配参数与`module.py`中的`predict`函数接口一致。 +**参数说明**: +* `init_args` 中的可配参数与 `module.py` 中的 `_initialize` 函数接口一致。其中, + - 当 `use_gpu` 为 `true` 时,表示使用 GPU 启动服务。 + - 当 `enable_mkldnn` 为 `true` 时,表示使用 MKL-DNN 加速。 +* `predict_args` 中的可配参数与 `module.py` 中的 `predict` 函数接口一致。 -**注意:** -- 使用配置文件启动服务时,其他参数会被忽略。 -- 如果使用GPU预测(即,`use_gpu`置为`true`),则需要在启动服务之前,设置CUDA_VISIBLE_DEVICES环境变量,如:```export CUDA_VISIBLE_DEVICES=0```,否则不用设置。 -- **`use_gpu`不可与`use_multiprocess`同时为`true`**。 -- **`use_gpu`与`enable_mkldnn`同时为`true`时,将忽略`enable_mkldnn`,而使用GPU**。 +**注意**: +* 使用配置文件启动服务时,将使用配置文件中的参数设置,其他命令行参数将被忽略; +* 如果使用 GPU 预测(即,`use_gpu` 置为 `true`),则需要在启动服务之前,设置 `CUDA_VISIBLE_DEVICES` 环境变量来指定所使用的 GPU 卡号,如:`export CUDA_VISIBLE_DEVICES=0`; +* **`use_gpu` 不可与 `use_multiprocess` 同时为 `true`**; +* **`use_gpu` 与 `enable_mkldnn` 同时为 `true` 时,将忽略 `enable_mkldnn`,而使用 GPU**。 + +如使用 GPU 3 号卡启动服务: -如,使用GPU 3号卡启动串联服务: ```shell cd PaddleClas/deploy export CUDA_VISIBLE_DEVICES=3 hub serving start -c hubserving/clas/config.json -``` +``` -## 发送预测请求 -配置好服务端,可使用以下命令发送预测请求,获取预测结果: + +## 6. 发送预测请求 + +配置好服务端后,可使用以下命令发送预测请求,获取预测结果: ```shell cd PaddleClas/deploy -python hubserving/test_hubserving.py server_url image_path -``` - -需要给脚本传递2个必须参数: -- **server_url**:服务地址,格式为 -`http://[ip_address]:[port]/predict/[module_name]` -- **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径。 -- **batch_size**:[**可选**] 以`batch_size`大小为单位进行预测,默认为`1`。 -- **resize_short**:[**可选**] 预处理时,按短边调整大小,默认为`256`。 -- **crop_size**:[**可选**] 预处理时,居中裁剪的大小,默认为`224`。 -- **normalize**:[**可选**] 预处理时,是否进行`normalize`,默认为`True`。 -- **to_chw**:[**可选**] 预处理时,是否调整为`CHW`顺序,默认为`True`。 +python3.7 hubserving/test_hubserving.py \ +--server_url http://127.0.0.1:8866/predict/clas_system \ +--image_file ./hubserving/ILSVRC2012_val_00006666.JPEG \ +--batch_size 8 +``` +**预测输出** +```log +The result(s): class_ids: [57, 67, 68, 58, 65], label_names: ['garter snake, grass snake', 'diamondback, diamondback rattlesnake, Crotalus adamanteus', 'sidewinder, horned rattlesnake, Crotalus cerastes', 'water snake', 'sea snake'], scores: [0.21915, 0.15631, 0.14794, 0.13177, 0.12285] +The average time of prediction cost: 2.970 s/image +The average time cost: 3.014 s/image +The average top-1 score: 0.110 +``` -**注意**:如果使用`Transformer`系列模型,如`DeiT_***_384`, `ViT_***_384`等,请注意模型的输入数据尺寸,需要指定`--resize_short=384 --crop_size=384`。 +**脚本参数说明**: +* **server_url**:服务地址,格式为`http://[ip_address]:[port]/predict/[module_name]`。 +* **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径。 +* **batch_size**:[**可选**] 以 `batch_size` 大小为单位进行预测,默认为 `1`。 +* **resize_short**:[**可选**] 预处理时,按短边调整大小,默认为 `256`。 +* **crop_size**:[**可选**] 预处理时,居中裁剪的大小,默认为 `224`。 +* **normalize**:[**可选**] 预处理时,是否进行 `normalize`,默认为 `True`。 +* **to_chw**:[**可选**] 预处理时,是否调整为 `CHW` 顺序,默认为 `True`。 +**注意**:如果使用 `Transformer` 系列模型,如 `DeiT_***_384`, `ViT_***_384` 等,请注意模型的输入数据尺寸,需要指定`--resize_short=384 --crop_size=384`。 -访问示例: +**返回结果格式说明**: +返回结果为列表(list),包含 top-k 个分类结果,以及对应的得分,还有此图片预测耗时,具体如下: ```shell -python hubserving/test_hubserving.py --server_url http://127.0.0.1:8866/predict/clas_system --image_file ./hubserving/ILSVRC2012_val_00006666.JPEG --batch_size 8 -``` - -### 返回结果格式说明 -返回结果为列表(list),包含top-k个分类结果,以及对应的得分,还有此图片预测耗时,具体如下: -``` list: 返回结果 -└─ list: 第一张图片结果 - └─ list: 前k个分类结果,依score递减排序 - └─ list: 前k个分类结果对应的score,依score递减排序 - └─ float: 该图分类耗时,单位秒 +└──list: 第一张图片结果 + ├── list: 前 k 个分类结果,依 score 递减排序 + ├── list: 前 k 个分类结果对应的 score,依 score 递减排序 + └── float: 该图分类耗时,单位秒 ``` -**说明:** 如果需要增加、删除、修改返回字段,可对相应模块进行修改,完整流程参考下一节自定义修改服务模块。 -## 自定义修改服务模块 -如果需要修改服务逻辑,你一般需要操作以下步骤: -- 1、 停止服务 -```hub serving stop --port/-p XXXX``` + +## 7. 自定义修改服务模块 -- 2、 到相应的`module.py`和`params.py`等文件中根据实际需求修改代码。`module.py`修改后需要重新安装(`hub install hubserving/clas/`)并部署。在进行部署前,可通过`python hubserving/clas/module.py`测试已安装服务模块。 +如果需要修改服务逻辑,需要进行以下操作: -- 3、 卸载旧服务包 -```hub uninstall clas_system``` +1. 停止服务 + ```shell + hub serving stop --port/-p XXXX + ``` -- 4、 安装修改后的新服务包 -```hub install hubserving/clas/``` +2. 到相应的 `module.py` 和 `params.py` 等文件中根据实际需求修改代码。`module.py` 修改后需要重新安装(`hub install hubserving/clas/`)并部署。在进行部署前,可先通过 `python3.7 hubserving/clas/module.py` 命令来快速测试准备部署的代码。 -- 5、重新启动服务 -```hub serving start -m clas_system``` +3. 卸载旧服务包 + ```shell + hub uninstall clas_system + ``` + +4. 安装修改后的新服务包 + ```shell + hub install hubserving/clas/ + ``` + +5. 重新启动服务 + ```shell + hub serving start -m clas_system + ``` **注意**: -常用参数可在[params.py](./clas/params.py)中修改: +常用参数可在 `PaddleClas/deploy/hubserving/clas/params.py` 中修改: * 更换模型,需要修改模型文件路径参数: ```python "inference_model_dir": ``` - * 更改后处理时返回的`top-k`结果数量: + * 更改后处理时返回的 `top-k` 结果数量: ```python 'topk': ``` - * 更改后处理时的lable与class id对应映射文件: + * 更改后处理时的 lable 与 class id 对应映射文件: ```python 'class_id_map_file': ``` -为了避免不必要的延时以及能够以batch_size进行预测,数据预处理逻辑(包括resize、crop等操作)在客户端完成,因此需要在[test_hubserving.py](./test_hubserving.py#L35-L52)中修改。 +为了避免不必要的延时以及能够以 batch_size 进行预测,数据预处理逻辑(包括 `resize`、`crop` 等操作)均在客户端完成,因此需要在 [PaddleClas/deploy/hubserving/test_hubserving.py#L41-L47](./test_hubserving.py#L41-L47) 以及 [PaddleClas/deploy/hubserving/test_hubserving.py#L51-L76](./test_hubserving.py#L51-L76) 中修改数据预处理逻辑相关代码。 diff --git a/deploy/hubserving/readme_en.md b/deploy/hubserving/readme_en.md index bb0ddbd2c3a994b164d8781767b8de38d484b420..6dce5cc52cc32ef41b8f18d5eb772cc44a1661ad 100644 --- a/deploy/hubserving/readme_en.md +++ b/deploy/hubserving/readme_en.md @@ -1,83 +1,116 @@ English | [简体中文](readme.md) -# Service deployment based on PaddleHub Serving +# Service deployment based on PaddleHub Serving + +PaddleClas supports rapid service deployment through PaddleHub. Currently, the deployment of image classification is supported. Please look forward to the deployment of image recognition. + +## Catalogue +- [1 Introduction](#1-introduction) +- [2. Prepare the environment](#2-prepare-the-environment) +- [3. Download the inference model](#3-download-the-inference-model) +- [4. Install the service module](#4-install-the-service-module) +- [5. Start service](#5-start-service) + - [5.1 Start with command line parameters](#51-start-with-command-line-parameters) + - [5.2 Start with configuration file](#52-start-with-configuration-file) +- [6. Send prediction requests](#6-send-prediction-requests) +- [7. User defined service module modification](#7-user-defined-service-module-modification) -HubServing service pack contains 3 files, the directory is as follows: -``` -hubserving/clas/ - └─ __init__.py Empty file, required - └─ config.json Configuration file, optional, passed in as a parameter when using configuration to start the service - └─ module.py Main module file, required, contains the complete logic of the service - └─ params.py Parameter file, required, including parameters such as model path, pre- and post-processing parameters -``` -## Quick start service -### 1. Prepare the environment + +## 1 Introduction + +The hubserving service deployment configuration service package `clas` contains 3 required files, the directories are as follows: + ```shell -# Install version 2.0 of PaddleHub -pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple +deploy/hubserving/clas/ +├── __init__.py # Empty file, required +├── config.json # Configuration file, optional, passed in as a parameter when starting the service with configuration +├── module.py # The main module, required, contains the complete logic of the service +└── params.py # Parameter file, required, including model path, pre- and post-processing parameters and other parameters ``` -### 2. Download inference model -Before installing the service module, you need to prepare the inference model and put it in the correct path. The default model path is: -``` -Model structure file: PaddleClas/inference/inference.pdmodel -Model parameters file: PaddleClas/inference/inference.pdiparams + +## 2. Prepare the environment +```shell +# Install paddlehub, version 2.1.0 is recommended +python3.7 -m pip install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple ``` -* The model file path can be viewed and modified in `PaddleClas/deploy/hubserving/clas/params.py`. - It should be noted that the prefix of model structure file and model parameters file must be `inference`. + +## 3. Download the inference model -* More models provided by PaddleClas can be obtained from the [model library](../../docs/en/models/models_intro_en.md). You can also use models trained by yourself. +Before installing the service module, you need to prepare the inference model and put it in the correct path. The default model path is: -### 3. Install Service Module +* Classification inference model structure file: `PaddleClas/inference/inference.pdmodel` +* Classification inference model weight file: `PaddleClas/inference/inference.pdiparams` -* On Linux platform, the examples are as follows. -```shell -cd PaddleClas/deploy -hub install hubserving/clas/ -``` +**Notice**: +* Model file paths can be viewed and modified in `PaddleClas/deploy/hubserving/clas/params.py`: + + ```python + "inference_model_dir": "../inference/" + ``` +* Model files (including `.pdmodel` and `.pdiparams`) must be named `inference`. +* We provide a large number of pre-trained models based on the ImageNet-1k dataset. For the model list and download address, see [Model Library Overview](../../docs/en/algorithm_introduction/ImageNet_models_en.md), or you can use your own trained and converted models. + + + +## 4. Install the service module + +* In the Linux environment, the installation example is as follows: + ```shell + cd PaddleClas/deploy + # Install the service module: + hub install hubserving/clas/ + ``` + +* In the Windows environment (the folder separator is `\`), the installation example is as follows: + + ```shell + cd PaddleClas\deploy + # Install the service module: + hub install hubserving\clas\ + ``` + + + +## 5. Start service + + + +### 5.1 Start with command line parameters + +This method only supports prediction using CPU. Start command: -* On Windows platform, the examples are as follows. ```shell -cd PaddleClas\deploy -hub install hubserving\clas\ +hub serving start \ +--modules clas_system +--port 8866 ``` +This completes the deployment of a serviced API, using the default port number 8866. -### 4. Start service -#### Way 1. Start with command line parameters (CPU only) +**Parameter Description**: +| parameters | uses | +| ------------------ | ------------------- | +| --modules/-m | [**required**] PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs
*`When no Version is specified, the latest is selected by default version`* | +| --port/-p | [**OPTIONAL**] Service port, default is 8866 | +| --use_multiprocess | [**Optional**] Whether to enable the concurrent mode, the default is single-process mode, it is recommended to use this mode for multi-core CPU machines
*`Windows operating system only supports single-process mode`* | +| --workers | [**Optional**] The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores | +For more deployment details, see [PaddleHub Serving Model One-Click Service Deployment](https://paddlehub.readthedocs.io/zh_CN/release-v2.1/tutorial/serving.html) -**start command:** -```shell -$ hub serving start --modules Module1==Version1 \ - --port XXXX \ - --use_multiprocess \ - --workers \ -``` -**parameters:** - -|parameters|usage| -|-|-| -|--modules/-m|PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs
*`When Version is not specified, the latest version is selected by default`*| -|--port/-p|Service port, default is 8866| -|--use_multiprocess|Enable concurrent mode, the default is single-process mode, this mode is recommended for multi-core CPU machines
*`Windows operating system only supports single-process mode`*| -|--workers|The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores| - -For example, start the 2-stage series service: -```shell -hub serving start -m clas_system -``` + +### 5.2 Start with configuration file -This completes the deployment of a service API, using the default port number 8866. +This method only supports prediction using CPU or GPU. Start command: -#### Way 2. Start with configuration file(CPU、GPU) -**start command:** ```shell -hub serving start --config/-c config.json -``` -Wherein, the format of `config.json` is as follows: +hub serving start -c config.json +``` + +Among them, the format of `config.json` is as follows: + ```json { "modules_info": { @@ -96,104 +129,110 @@ Wherein, the format of `config.json` is as follows: "workers": 2 } ``` -- The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`. Among them, - - when `use_gpu` is `true`, it means that the GPU is used to start the service. - - when `enable_mkldnn` is `true`, it means that use MKL-DNN to accelerate. -- The configurable parameters in `predict_args` are consistent with the `predict` function interface in `module.py`. - -**Note:** -- When using the configuration file to start the service, other parameters will be ignored. -- If you use GPU prediction (that is, `use_gpu` is set to `true`), you need to set the environment variable CUDA_VISIBLE_DEVICES before starting the service, such as: ```export CUDA_VISIBLE_DEVICES=0```, otherwise you do not need to set it. -- **`use_gpu` and `use_multiprocess` cannot be `true` at the same time.** -- **When both `use_gpu` and `enable_mkldnn` are set to `true` at the same time, GPU is used to run and `enable_mkldnn` will be ignored.** - -For example, use GPU card No. 3 to start the 2-stage series service: + +**Parameter Description**: +* The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`. in, + - When `use_gpu` is `true`, it means to use GPU to start the service. + - When `enable_mkldnn` is `true`, it means to use MKL-DNN acceleration. +* The configurable parameters in `predict_args` are consistent with the `predict` function interface in `module.py`. + +**Notice**: +* When using the configuration file to start the service, the parameter settings in the configuration file will be used, and other command line parameters will be ignored; +* If you use GPU prediction (ie, `use_gpu` is set to `true`), you need to set the `CUDA_VISIBLE_DEVICES` environment variable to specify the GPU card number used before starting the service, such as: `export CUDA_VISIBLE_DEVICES=0`; +* **`use_gpu` cannot be `true`** at the same time as `use_multiprocess`; +* ** When both `use_gpu` and `enable_mkldnn` are `true`, `enable_mkldnn` will be ignored and GPU** will be used. + +If you use GPU No. 3 card to start the service: + ```shell cd PaddleClas/deploy export CUDA_VISIBLE_DEVICES=3 hub serving start -c hubserving/clas/config.json -``` - -## Send prediction requests -After the service starts, you can use the following command to send a prediction request to obtain the prediction result: -```shell -cd PaddleClas/deploy -python hubserving/test_hubserving.py server_url image_path ``` -Two required parameters need to be passed to the script: -- **server_url**: service address,format of which is -`http://[ip_address]:[port]/predict/[module_name]` -- **image_path**: Test image path, can be a single image path or an image directory path -- **batch_size**: [**Optional**] batch_size. Default by `1`. -- **resize_short**: [**Optional**] In preprocessing, resize according to short size. Default by `256`。 -- **crop_size**: [**Optional**] In preprocessing, centor crop size. Default by `224`。 -- **normalize**: [**Optional**] In preprocessing, whether to do `normalize`. Default by `True`。 -- **to_chw**: [**Optional**] In preprocessing, whether to transpose to `CHW`. Default by `True`。 + +## 6. Send prediction requests -**Notice**: -If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `--resize_short=384`, `--crop_size=384`. +After configuring the server, you can use the following command to send a prediction request to get the prediction result: -**Eg.** ```shell -python hubserving/test_hubserving.py --server_url http://127.0.0.1:8866/predict/clas_system --image_file ./hubserving/ILSVRC2012_val_00006666.JPEG --batch_size 8 -``` - -### Returned result format -The returned result is a list, including the `top_k`'s classification results, corresponding scores and the time cost of prediction, details as follows. - -``` -list: The returned results -└─ list: The result of first picture - └─ list: The top-k classification results, sorted in descending order of score - └─ list: The scores corresponding to the top-k classification results, sorted in descending order of score - └─ float: The time cost of predicting the picture, unit second +cd PaddleClas/deploy +python3.7 hubserving/test_hubserving.py \ +--server_url http://127.0.0.1:8866/predict/clas_system \ +--image_file ./hubserving/ILSVRC2012_val_00006666.JPEG \ +--batch_size 8 +``` +**Predicted output** +```log +The result(s): class_ids: [57, 67, 68, 58, 65], label_names: ['garter snake, grass snake', 'diamondback, diamondback rattlesnake, Crotalus adamanteus', 'sidewinder, horned rattlesnake, Crotalus cerastes' , 'water snake', 'sea snake'], scores: [0.21915, 0.15631, 0.14794, 0.13177, 0.12285] +The average time of prediction cost: 2.970 s/image +The average time cost: 3.014 s/image +The average top-1 score: 0.110 +``` + +**Script parameter description**: +* **server_url**: Service address, the format is `http://[ip_address]:[port]/predict/[module_name]`. +* **image_path**: The test image path, which can be a single image path or an image collection directory path. +* **batch_size**: [**OPTIONAL**] Make predictions in `batch_size` size, default is `1`. +* **resize_short**: [**optional**] When preprocessing, resize by short edge, default is `256`. +* **crop_size**: [**Optional**] The size of the center crop during preprocessing, the default is `224`. +* **normalize**: [**Optional**] Whether to perform `normalize` during preprocessing, the default is `True`. +* **to_chw**: [**Optional**] Whether to adjust to `CHW` order when preprocessing, the default is `True`. + +**Note**: If you use `Transformer` series models, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input data size of the model, you need to specify `--resize_short=384 -- crop_size=384`. + +**Return result format description**: +The returned result is a list (list), including the top-k classification results, the corresponding scores, and the time-consuming prediction of this image, as follows: +```shell +list: return result +└──list: first image result + ├── list: the top k classification results, sorted in descending order of score + ├── list: the scores corresponding to the first k classification results, sorted in descending order of score + └── float: The image classification time, in seconds ``` -**Note:** If you need to add, delete or modify the returned fields, you can modify the corresponding module. For the details, refer to the user-defined modification service module in the next section. -## User defined service module modification -If you need to modify the service logic, the following steps are generally required: -1. Stop service -```shell -hub serving stop --port/-p XXXX -``` + +## 7. User defined service module modification -2. Modify the code in the corresponding files, like `module.py` and `params.py`, according to the actual needs. You need re-install(hub install hubserving/clas/) and re-deploy after modifing `module.py`. -After modifying and installing and before deploying, you can use `python hubserving/clas/module.py` to test the installed service module. +If you need to modify the service logic, you need to do the following: -For example, if you need to replace the model used by the deployed service, you need to modify model path parameters `cfg.model_file` and `cfg.params_file` in `params.py`. Of course, other related parameters may need to be modified at the same time. Please modify and debug according to the actual situation. - -3. Uninstall old service module -```shell -hub uninstall clas_system -``` +1. Stop the service + ```shell + hub serving stop --port/-p XXXX + ``` -4. Install modified service module -```shell -hub install hubserving/clas/ -``` +2. Go to the corresponding `module.py` and `params.py` and other files to modify the code according to actual needs. `module.py` needs to be reinstalled after modification (`hub install hubserving/clas/`) and deployed. Before deploying, you can use the `python3.7 hubserving/clas/module.py` command to quickly test the code ready for deployment. -5. Restart service -```shell -hub serving start -m clas_system -``` +3. Uninstall the old service pack + ```shell + hub uninstall clas_system + ``` -**Note**: +4. Install the new modified service pack + ```shell + hub install hubserving/clas/ + ``` -Common parameters can be modified in params.py: -* Directory of model files(include model structure file and model parameters file): - ```python - "inference_model_dir": - ``` -* The number of Top-k results returned during post-processing: - ```python - 'topk': - ``` -* Mapping file corresponding to label and class ID during post-processing: - ```python - 'class_id_map_file': - ``` +5. Restart the service + ```shell + hub serving start -m clas_system + ``` -In order to avoid unnecessary delay and be able to predict in batch, the preprocessing (include resize, crop and other) is completed in the client, so modify [test_hubserving.py](./test_hubserving.py#L35-L52) if necessary. +**Notice**: +Common parameters can be modified in `PaddleClas/deploy/hubserving/clas/params.py`: + * To replace the model, you need to modify the model file path parameters: + ```python + "inference_model_dir": + ``` + * Change the number of `top-k` results returned when postprocessing: + ```python + 'topk': + ``` + * The mapping file corresponding to the lable and class id when changing the post-processing: + ```python + 'class_id_map_file': + ``` + +In order to avoid unnecessary delay and be able to predict with batch_size, data preprocessing logic (including `resize`, `crop` and other operations) is completed on the client side, so it needs to modify data preprocessing logic related code in [PaddleClas/deploy/hubserving/test_hubserving.py# L41-L47](./test_hubserving.py#L41-L47) and [PaddleClas/deploy/hubserving/test_hubserving.py#L51-L76](./test_hubserving.py#L51-L76). diff --git a/deploy/images/ILSVRC2012_val_00000010.jpeg b/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg similarity index 100% rename from deploy/images/ILSVRC2012_val_00000010.jpeg rename to deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg diff --git a/deploy/images/ILSVRC2012_val_00010010.jpeg b/deploy/images/ImageNet/ILSVRC2012_val_00010010.jpeg similarity index 100% rename from deploy/images/ILSVRC2012_val_00010010.jpeg rename to deploy/images/ImageNet/ILSVRC2012_val_00010010.jpeg diff --git a/deploy/images/ILSVRC2012_val_00020010.jpeg b/deploy/images/ImageNet/ILSVRC2012_val_00020010.jpeg similarity index 100% rename from deploy/images/ILSVRC2012_val_00020010.jpeg rename to deploy/images/ImageNet/ILSVRC2012_val_00020010.jpeg diff --git a/deploy/images/ILSVRC2012_val_00030010.jpeg b/deploy/images/ImageNet/ILSVRC2012_val_00030010.jpeg similarity index 100% rename from deploy/images/ILSVRC2012_val_00030010.jpeg rename to deploy/images/ImageNet/ILSVRC2012_val_00030010.jpeg diff --git a/deploy/images/PULC/car_exists/objects365_00001507.jpeg b/deploy/images/PULC/car_exists/objects365_00001507.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..9959954b6b8bf27589e1d2081f86c6078d16e2c1 Binary files /dev/null and b/deploy/images/PULC/car_exists/objects365_00001507.jpeg differ diff --git a/deploy/images/PULC/car_exists/objects365_00001521.jpeg b/deploy/images/PULC/car_exists/objects365_00001521.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..ea65b3108ec0476ce952b3221c31ac54fcef161d Binary files /dev/null and b/deploy/images/PULC/car_exists/objects365_00001521.jpeg differ diff --git a/deploy/images/PULC/language_classification/word_17.png b/deploy/images/PULC/language_classification/word_17.png new file mode 100644 index 0000000000000000000000000000000000000000..c0cd74632460e01676fbc5a43b220c0a7f7d0474 Binary files /dev/null and b/deploy/images/PULC/language_classification/word_17.png differ diff --git a/deploy/images/PULC/language_classification/word_20.png b/deploy/images/PULC/language_classification/word_20.png new file mode 100644 index 0000000000000000000000000000000000000000..f9149670e8a2aa086c91451442f63a727661fd7d Binary files /dev/null and b/deploy/images/PULC/language_classification/word_20.png differ diff --git a/deploy/images/PULC/language_classification/word_35404.png b/deploy/images/PULC/language_classification/word_35404.png new file mode 100644 index 0000000000000000000000000000000000000000..9e1789ab47aefecac8eaf1121decfc6a8cfb1e8b Binary files /dev/null and b/deploy/images/PULC/language_classification/word_35404.png differ diff --git a/deploy/images/PULC/person_attribute/090004.jpg b/deploy/images/PULC/person_attribute/090004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..140694eeec3d2925303e8c0d544ef5979cd78219 Binary files /dev/null and b/deploy/images/PULC/person_attribute/090004.jpg differ diff --git a/deploy/images/PULC/person_attribute/090007.jpg b/deploy/images/PULC/person_attribute/090007.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fea2e7c9e0047a8b59606877ad41fe24bf2e24c Binary files /dev/null and b/deploy/images/PULC/person_attribute/090007.jpg differ diff --git a/deploy/images/PULC/person_exists/objects365_01780782.jpg b/deploy/images/PULC/person_exists/objects365_01780782.jpg new file mode 100755 index 0000000000000000000000000000000000000000..a0dd0df59ae5a6386a04a8e0cf9cdbc529139c16 Binary files /dev/null and b/deploy/images/PULC/person_exists/objects365_01780782.jpg differ diff --git a/deploy/images/PULC/person_exists/objects365_02035329.jpg b/deploy/images/PULC/person_exists/objects365_02035329.jpg new file mode 100755 index 0000000000000000000000000000000000000000..16d7f2d08cd87bda1b67d21655f00f94a0c6e4e4 Binary files /dev/null and b/deploy/images/PULC/person_exists/objects365_02035329.jpg differ diff --git a/deploy/images/PULC/safety_helmet/safety_helmet_test_1.png b/deploy/images/PULC/safety_helmet/safety_helmet_test_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c28f54f77d54df6e68e471538846b01db4387e08 Binary files /dev/null and b/deploy/images/PULC/safety_helmet/safety_helmet_test_1.png differ diff --git a/deploy/images/PULC/safety_helmet/safety_helmet_test_2.png b/deploy/images/PULC/safety_helmet/safety_helmet_test_2.png new file mode 100644 index 0000000000000000000000000000000000000000..8e784af808afb58d67fdb3e277dfeebd134ee846 Binary files /dev/null and b/deploy/images/PULC/safety_helmet/safety_helmet_test_2.png differ diff --git a/deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg b/deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg new file mode 100644 index 0000000000000000000000000000000000000000..412d41956ba48c8e3243bdeff746d389be7e762b Binary files /dev/null and b/deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg differ diff --git a/deploy/images/PULC/text_image_orientation/img_rot180_demo.jpg b/deploy/images/PULC/text_image_orientation/img_rot180_demo.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4725b96698e2ac222ae9d4830d8f29a33322443 Binary files /dev/null and b/deploy/images/PULC/text_image_orientation/img_rot180_demo.jpg differ diff --git a/deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png b/deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png new file mode 100644 index 0000000000000000000000000000000000000000..4b8d24d29ff0f8b4befff6bf943d506c36061d4d Binary files /dev/null and b/deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png differ diff --git a/deploy/images/PULC/textline_orientation/textline_orientation_test_0_1.png b/deploy/images/PULC/textline_orientation/textline_orientation_test_0_1.png new file mode 100644 index 0000000000000000000000000000000000000000..42ad5234973679e65be6054f90c1cc7c0f989bd2 Binary files /dev/null and b/deploy/images/PULC/textline_orientation/textline_orientation_test_0_1.png differ diff --git a/deploy/images/PULC/textline_orientation/textline_orientation_test_1_0.png b/deploy/images/PULC/textline_orientation/textline_orientation_test_1_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ac2447842dd0fac260c0d3c6e0d156dda9890923 Binary files /dev/null and b/deploy/images/PULC/textline_orientation/textline_orientation_test_1_0.png differ diff --git a/deploy/images/PULC/textline_orientation/textline_orientation_test_1_1.png b/deploy/images/PULC/textline_orientation/textline_orientation_test_1_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7d5b75f7e5bbeabded56eba1b4b566c4ca019590 Binary files /dev/null and b/deploy/images/PULC/textline_orientation/textline_orientation_test_1_1.png differ diff --git a/deploy/images/PULC/traffic_sign/100999_83928.jpg b/deploy/images/PULC/traffic_sign/100999_83928.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f32ed5ae2d8483d29986e3a45db1789da2a4d43 Binary files /dev/null and b/deploy/images/PULC/traffic_sign/100999_83928.jpg differ diff --git a/deploy/images/PULC/traffic_sign/99603_17806.jpg b/deploy/images/PULC/traffic_sign/99603_17806.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c792fdf6eb64395fffaf8289a1ec14d47279860e Binary files /dev/null and b/deploy/images/PULC/traffic_sign/99603_17806.jpg differ diff --git a/deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg b/deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb5de9fc6ff99550bf9bff8d4a9f0d0e0fe18c06 Binary files /dev/null and b/deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg differ diff --git a/deploy/images/PULC/vehicle_attribute/0014_c012_00040750_0.jpg b/deploy/images/PULC/vehicle_attribute/0014_c012_00040750_0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76207d43ce597a1079c523dca0c32923bf15db19 Binary files /dev/null and b/deploy/images/PULC/vehicle_attribute/0014_c012_00040750_0.jpg differ diff --git a/deploy/images/Pedestrain_Attr.jpg b/deploy/images/Pedestrain_Attr.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a87e856af8c17a3b93617b93ea517b91c508619 Binary files /dev/null and b/deploy/images/Pedestrain_Attr.jpg differ diff --git a/deploy/lite_shitu/README.md b/deploy/lite_shitu/README.md index 52871c3c16dc9990f9cf23de24b24cb54067cac6..e2a03caedd0d4bf63af96d3541d1a8d021206e52 100644 --- a/deploy/lite_shitu/README.md +++ b/deploy/lite_shitu/README.md @@ -92,9 +92,9 @@ PaddleClas 提供了转换并优化后的推理模型,可以直接参考下方 ```shell # 进入lite_ppshitu目录 cd $PaddleClas/deploy/lite_shitu -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/lite/ppshitu_lite_models_v1.1.tar -tar -xf ppshitu_lite_models_v1.1.tar -rm -f ppshitu_lite_models_v1.1.tar +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/lite/ppshitu_lite_models_v1.2.tar +tar -xf ppshitu_lite_models_v1.2.tar +rm -f ppshitu_lite_models_v1.2.tar ``` #### 2.1.2 使用其他模型 @@ -162,7 +162,7 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git # 进入PaddleDetection根目录 cd PaddleDetection # 将预训练模型导出为inference模型 -python tools/export_model.py -c configs/picodet/application/mainbody_detection/picodet_lcnet_x2_5_640_mainbody.yml -o weights=https://paddledet.bj.bcebos.com/models/picodet_lcnet_x2_5_640_mainbody.pdparams --output_dir=inference +python tools/export_model.py -c configs/picodet/application/mainbody_detection/picodet_lcnet_x2_5_640_mainbody.yml -o weights=https://paddledet.bj.bcebos.com/models/picodet_lcnet_x2_5_640_mainbody.pdparams export_post_process=False --output_dir=inference # 将inference模型转化为Paddle-Lite优化模型 paddle_lite_opt --model_file=inference/picodet_lcnet_x2_5_640_mainbody/model.pdmodel --param_file=inference/picodet_lcnet_x2_5_640_mainbody/model.pdiparams --optimize_out=inference/picodet_lcnet_x2_5_640_mainbody/mainbody_det # 将转好的模型复制到lite_shitu目录下 @@ -183,24 +183,56 @@ cd deploy/lite_shitu **注意**:`--optimize_out` 参数为优化后模型的保存路径,无需加后缀`.nb`;`--model_file` 参数为模型结构信息文件的路径,`--param_file` 参数为模型权重信息文件的路径,请注意文件名。 -### 2.2 将yaml文件转换成json文件 +### 2.2 生成新的检索库 + +由于lite 版本的检索库用的是`faiss1.5.3`版本,与新版本不兼容,因此需要重新生成index库 + +#### 2.2.1 数据及环境配置 + +```shell +# 进入上级目录 +cd .. +# 下载瓶装饮料数据集 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar && tar -xf drink_dataset_v1.0.tar +rm -rf drink_dataset_v1.0.tar +rm -rf drink_dataset_v1.0/index + +# 安装1.5.3版本的faiss +pip install faiss-cpu==1.5.3 + +# 下载通用识别模型,可替换成自己的inference model +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar +rm -rf general_PPLCNet_x2_5_lite_v1.0_infer.tar +``` + +#### 2.2.2 生成新的index文件 + +```shell +# 生成新的index库,注意指定好识别模型的路径,同时将index_mothod修改成Flat,HNSW32和IVF在此版本中可能存在bug,请慎重使用。 +# 如果使用自己的识别模型,对应的修改inference model的目录 +python python/build_gallery.py -c configs/inference_drink.yaml -o Global.rec_inference_model_dir=general_PPLCNet_x2_5_lite_v1.0_infer -o IndexProcess.index_method=Flat + +# 进入到lite_shitu目录 +cd lite_shitu +mv ../drink_dataset_v1.0 . +``` + +### 2.3 将yaml文件转换成json文件 ```shell # 如果测试单张图像 -python generate_json_config.py --det_model_path ppshitu_lite_models_v1.1/mainbody_PPLCNet_x2_5_640_quant_v1.1_lite.nb --rec_model_path ppshitu_lite_models_v1.1/general_PPLCNet_x2_5_lite_v1.1_infer.nb --img_path images/demo.jpg +python generate_json_config.py --det_model_path ppshitu_lite_models_v1.2/mainbody_PPLCNet_x2_5_640_v1.2_lite.nb --rec_model_path ppshitu_lite_models_v1.2/general_PPLCNet_x2_5_lite_v1.2_infer.nb --img_path images/demo.jpeg # or # 如果测试多张图像 -python generate_json_config.py --det_model_path ppshitu_lite_models_v1.1/mainbody_PPLCNet_x2_5_640_quant_v1.1_lite.nb --rec_model_path ppshitu_lite_models_v1.1/general_PPLCNet_x2_5_lite_v1.1_infer.nb --img_dir images +python generate_json_config.py --det_model_path ppshitu_lite_models_v1.2/mainbody_PPLCNet_x2_5_640_v1.2_lite.nb --rec_model_path ppshitu_lite_models_v1.2/general_PPLCNet_x2_5_lite_v1.2_infer.nb --img_dir images # 执行完成后,会在lit_shitu下生成shitu_config.json配置文件 ``` -### 2.3 index字典转换 +### 2.4 index字典转换 由于python的检索库字典,使用`pickle`进行的序列化存储,导致C++不方便读取,因此需要进行转换 ```shell -# 下载瓶装饮料数据集 -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar && tar -xf drink_dataset_v1.0.tar -rm -rf drink_dataset_v1.0.tar # 转化id_map.pkl为id_map.txt python transform_id_map.py -c ../configs/inference_drink.yaml @@ -208,7 +240,7 @@ python transform_id_map.py -c ../configs/inference_drink.yaml 转换成功后,会在`IndexProcess.index_dir`目录下生成`id_map.txt`。 -### 2.4 与手机联调 +### 2.5 与手机联调 首先需要进行一些准备工作。 1. 准备一台arm8的安卓手机,如果编译的预测库是armv7,则需要arm7的手机,并修改Makefile中`ARM_ABI=arm7`。 @@ -308,8 +340,9 @@ chmod 777 pp_shitu 运行效果如下: ``` -images/demo.jpg: - result0: bbox[253, 275, 1146, 872], score: 0.974196, label: 伊藤园_果蔬汁 +images/demo.jpeg: + result0: bbox[344, 98, 527, 593], score: 0.811656, label: 红牛-强化型 + result1: bbox[0, 0, 600, 600], score: 0.729664, label: 红牛-强化型 ``` ## FAQ diff --git a/deploy/lite_shitu/generate_json_config.py b/deploy/lite_shitu/generate_json_config.py index 37d06c47e686daf5335dbbf1a193658c4ac20ac3..642dfcd9d6a46e2894ec0f01f0914a5347bc8d72 100644 --- a/deploy/lite_shitu/generate_json_config.py +++ b/deploy/lite_shitu/generate_json_config.py @@ -95,7 +95,7 @@ def main(): config_json["Global"]["det_model_path"] = args.det_model_path config_json["Global"]["rec_model_path"] = args.rec_model_path config_json["Global"]["rec_label_path"] = args.rec_label_path - config_json["Global"]["label_list"] = config_yaml["Global"]["labe_list"] + config_json["Global"]["label_list"] = config_yaml["Global"]["label_list"] config_json["Global"]["rec_nms_thresold"] = config_yaml["Global"][ "rec_nms_thresold"] config_json["Global"]["max_det_results"] = config_yaml["Global"][ diff --git a/deploy/lite_shitu/images/demo.jpeg b/deploy/lite_shitu/images/demo.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..2ef10aae5f7f5ce515cb51f857b66c6195f6664b Binary files /dev/null and b/deploy/lite_shitu/images/demo.jpeg differ diff --git a/deploy/lite_shitu/images/demo.jpg b/deploy/lite_shitu/images/demo.jpg deleted file mode 100644 index 075dc31d4e6b407b792cc8abca82dcd541be8d11..0000000000000000000000000000000000000000 Binary files a/deploy/lite_shitu/images/demo.jpg and /dev/null differ diff --git a/deploy/lite_shitu/include/config_parser.h b/deploy/lite_shitu/include/config_parser.h index dca0e5a6898a219932ee978591d89c7624988f3f..2bed92059c41be344b863d5b8a81f9367dfa48fc 100644 --- a/deploy/lite_shitu/include/config_parser.h +++ b/deploy/lite_shitu/include/config_parser.h @@ -29,16 +29,16 @@ namespace PPShiTu { -void load_jsonf(std::string jsonfile, Json::Value& jsondata); +void load_jsonf(std::string jsonfile, Json::Value &jsondata); // Inference model configuration parser class ConfigPaser { - public: +public: ConfigPaser() {} ~ConfigPaser() {} - bool load_config(const Json::Value& config) { + bool load_config(const Json::Value &config) { // Get model arch : YOLO, SSD, RetinaNet, RCNN, Face if (config["Global"].isMember("det_arch")) { @@ -89,4 +89,4 @@ class ConfigPaser { std::vector fpn_stride_; }; -} // namespace PPShiTu +} // namespace PPShiTu diff --git a/deploy/lite_shitu/include/feature_extractor.h b/deploy/lite_shitu/include/feature_extractor.h index 1961459ecfab149695890df60cef550ed5177b52..6ef5cae13fe8b9a3724d88a30e562f5d091efc89 100644 --- a/deploy/lite_shitu/include/feature_extractor.h +++ b/deploy/lite_shitu/include/feature_extractor.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -48,10 +49,6 @@ public: config_file["Global"]["rec_model_path"].as()); this->predictor = CreatePaddlePredictor(config); - if (config_file["Global"]["rec_label_path"].as().empty()) { - std::cout << "Please set [rec_label_path] in config file" << std::endl; - exit(-1); - } SetPreProcessParam(config_file["RecPreProcess"]["transform_ops"]); printf("feature extract model create!\n"); } @@ -68,24 +65,29 @@ public: this->mean.emplace_back(tmp.as()); } for (auto tmp : item["std"]) { - this->std.emplace_back(1 / tmp.as()); + this->std.emplace_back(tmp.as()); } this->scale = item["scale"].as(); } } } - void RunRecModel(const cv::Mat &img, double &cost_time, std::vector &feature); - //void PostProcess(std::vector &feature); - cv::Mat ResizeImage(const cv::Mat &img); - void NeonMeanScale(const float *din, float *dout, int size); + void RunRecModel(const cv::Mat &img, double &cost_time, + std::vector &feature); + // void PostProcess(std::vector &feature); + void FeatureNorm(std::vector &featuer); private: std::shared_ptr predictor; - //std::vector label_list; + // std::vector label_list; std::vector mean = {0.485f, 0.456f, 0.406f}; - std::vector std = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; + std::vector std = {0.229f, 0.224f, 0.225f}; double scale = 0.00392157; - float size = 224; + int size = 224; + + // pre-process + Resize resize_op_; + NormalizeImage normalize_op_; + Permute permute_op_; }; } // namespace PPShiTu diff --git a/deploy/lite_shitu/include/object_detector.h b/deploy/lite_shitu/include/object_detector.h index 779cc89d197ece9115788ac25b9fa5d4c862e2fe..83212d7ea3ec2a93cf410c5930951709ab6d031d 100644 --- a/deploy/lite_shitu/include/object_detector.h +++ b/deploy/lite_shitu/include/object_detector.h @@ -16,24 +16,24 @@ #include #include +#include #include #include #include -#include +#include "json/json.h" #include #include #include -#include "json/json.h" -#include "paddle_api.h" // NOLINT +#include "paddle_api.h" // NOLINT #include "include/config_parser.h" +#include "include/picodet_postprocess.h" #include "include/preprocess_op.h" #include "include/utils.h" -#include "include/picodet_postprocess.h" -using namespace paddle::lite_api; // NOLINT +using namespace paddle::lite_api; // NOLINT namespace PPShiTu { @@ -41,53 +41,51 @@ namespace PPShiTu { std::vector GenerateColorMap(int num_class); // Visualiztion Detection Result -cv::Mat VisualizeResult(const cv::Mat& img, - const std::vector& results, - const std::vector& lables, - const std::vector& colormap, - const bool is_rbox); +cv::Mat VisualizeResult(const cv::Mat &img, + const std::vector &results, + const std::vector &lables, + const std::vector &colormap, const bool is_rbox); class ObjectDetector { - public: - explicit ObjectDetector(const Json::Value& config, - const std::string& model_dir, - int cpu_threads = 1, +public: + explicit ObjectDetector(const Json::Value &config, + const std::string &model_dir, int cpu_threads = 1, const int batch_size = 1) { config_.load_config(config); printf("config created\n"); preprocessor_.Init(config_.preprocess_info_); printf("before object detector\n"); - if(config["Global"]["det_model_path"].as().empty()){ - std::cout << "Please set [det_model_path] in config file" << std::endl; - exit(-1); + if (config["Global"]["det_model_path"].as().empty()) { + std::cout << "Please set [det_model_path] in config file" << std::endl; + exit(-1); } - LoadModel(config["Global"]["det_model_path"].as(), cpu_threads); - printf("create object detector\n"); } + LoadModel(config["Global"]["det_model_path"].as(), + cpu_threads); + printf("create object detector\n"); + } // Load Paddle inference model void LoadModel(std::string model_file, int num_theads); // Run predictor - void Predict(const std::vector& imgs, - const int warmup = 0, + void Predict(const std::vector &imgs, const int warmup = 0, const int repeats = 1, - std::vector* result = nullptr, - std::vector* bbox_num = nullptr, - std::vector* times = nullptr); + std::vector *result = nullptr, + std::vector *bbox_num = nullptr, + std::vector *times = nullptr); // Get Model Label list - const std::vector& GetLabelList() const { + const std::vector &GetLabelList() const { return config_.label_list_; } - private: +private: // Preprocess image and copy data to input buffer - void Preprocess(const cv::Mat& image_mat); + void Preprocess(const cv::Mat &image_mat); // Postprocess result void Postprocess(const std::vector mats, - std::vector* result, - std::vector bbox_num, - bool is_rbox); + std::vector *result, + std::vector bbox_num, bool is_rbox); std::shared_ptr predictor_; Preprocessor preprocessor_; @@ -96,7 +94,6 @@ class ObjectDetector { std::vector out_bbox_num_data_; float threshold_; ConfigPaser config_; - }; -} // namespace PPShiTu +} // namespace PPShiTu diff --git a/deploy/lite_shitu/include/picodet_postprocess.h b/deploy/lite_shitu/include/picodet_postprocess.h index 758795b5fbfba459ce6b7ea2d24c04c40eddd543..72dfd1b88af8296d81b56cbbc220dac61e8fa2b9 100644 --- a/deploy/lite_shitu/include/picodet_postprocess.h +++ b/deploy/lite_shitu/include/picodet_postprocess.h @@ -14,25 +14,23 @@ #pragma once -#include -#include -#include -#include #include +#include #include +#include +#include +#include #include "include/utils.h" namespace PPShiTu { -void PicoDetPostProcess(std::vector* results, - std::vector outs, - std::vector fpn_stride, - std::vector im_shape, - std::vector scale_factor, - float score_threshold = 0.3, - float nms_threshold = 0.5, - int num_class = 80, - int reg_max = 7); +void PicoDetPostProcess(std::vector *results, + std::vector outs, + std::vector fpn_stride, + std::vector im_shape, + std::vector scale_factor, + float score_threshold = 0.3, float nms_threshold = 0.5, + int num_class = 80, int reg_max = 7); -} // namespace PPShiTu +} // namespace PPShiTu diff --git a/deploy/lite_shitu/include/preprocess_op.h b/deploy/lite_shitu/include/preprocess_op.h index f7050fa86951bfe80aa4030adabc11ff43f82371..b7ed5e878d77c6aae15e399e57f8384e7a6d19fb 100644 --- a/deploy/lite_shitu/include/preprocess_op.h +++ b/deploy/lite_shitu/include/preprocess_op.h @@ -21,16 +21,16 @@ #include #include +#include "json/json.h" #include #include #include -#include "json/json.h" namespace PPShiTu { // Object for storing all preprocessed data class ImageBlob { - public: +public: // image width and height std::vector im_shape_; // Buffer for image data after preprocessing @@ -45,20 +45,20 @@ class ImageBlob { // Abstraction of preprocessing opration class class PreprocessOp { - public: - virtual void Init(const Json::Value& item) = 0; - virtual void Run(cv::Mat* im, ImageBlob* data) = 0; +public: + virtual void Init(const Json::Value &item) = 0; + virtual void Run(cv::Mat *im, ImageBlob *data) = 0; }; class InitInfo : public PreprocessOp { - public: - virtual void Init(const Json::Value& item) {} - virtual void Run(cv::Mat* im, ImageBlob* data); +public: + virtual void Init(const Json::Value &item) {} + virtual void Run(cv::Mat *im, ImageBlob *data); }; class NormalizeImage : public PreprocessOp { - public: - virtual void Init(const Json::Value& item) { +public: + virtual void Init(const Json::Value &item) { mean_.clear(); scale_.clear(); for (auto tmp : item["mean"]) { @@ -70,9 +70,11 @@ class NormalizeImage : public PreprocessOp { is_scale_ = item["is_scale"].as(); } - virtual void Run(cv::Mat* im, ImageBlob* data); + virtual void Run(cv::Mat *im, ImageBlob *data); + void Run_feature(cv::Mat *im, const std::vector &mean, + const std::vector &std, float scale); - private: +private: // CHW or HWC std::vector mean_; std::vector scale_; @@ -80,14 +82,15 @@ class NormalizeImage : public PreprocessOp { }; class Permute : public PreprocessOp { - public: - virtual void Init(const Json::Value& item) {} - virtual void Run(cv::Mat* im, ImageBlob* data); +public: + virtual void Init(const Json::Value &item) {} + virtual void Run(cv::Mat *im, ImageBlob *data); + void Run_feature(const cv::Mat *im, float *data); }; class Resize : public PreprocessOp { - public: - virtual void Init(const Json::Value& item) { +public: + virtual void Init(const Json::Value &item) { interp_ = item["interp"].as(); // max_size_ = item["target_size"].as(); keep_ratio_ = item["keep_ratio"].as(); @@ -98,11 +101,13 @@ class Resize : public PreprocessOp { } // Compute best resize scale for x-dimension, y-dimension - std::pair GenerateScale(const cv::Mat& im); + std::pair GenerateScale(const cv::Mat &im); - virtual void Run(cv::Mat* im, ImageBlob* data); + virtual void Run(cv::Mat *im, ImageBlob *data); + void Run_feature(const cv::Mat &img, cv::Mat &resize_img, int max_size_len, + int size = 0); - private: +private: int interp_; bool keep_ratio_; std::vector target_size_; @@ -111,46 +116,43 @@ class Resize : public PreprocessOp { // Models with FPN need input shape % stride == 0 class PadStride : public PreprocessOp { - public: - virtual void Init(const Json::Value& item) { +public: + virtual void Init(const Json::Value &item) { stride_ = item["stride"].as(); } - virtual void Run(cv::Mat* im, ImageBlob* data); + virtual void Run(cv::Mat *im, ImageBlob *data); - private: +private: int stride_; }; class TopDownEvalAffine : public PreprocessOp { - public: - virtual void Init(const Json::Value& item) { +public: + virtual void Init(const Json::Value &item) { trainsize_.clear(); for (auto tmp : item["trainsize"]) { trainsize_.emplace_back(tmp.as()); } } - virtual void Run(cv::Mat* im, ImageBlob* data); + virtual void Run(cv::Mat *im, ImageBlob *data); - private: +private: int interp_ = 1; std::vector trainsize_; }; -void CropImg(cv::Mat& img, - cv::Mat& crop_img, - std::vector& area, - std::vector& center, - std::vector& scale, +void CropImg(cv::Mat &img, cv::Mat &crop_img, std::vector &area, + std::vector ¢er, std::vector &scale, float expandratio = 0.15); class Preprocessor { - public: - void Init(const Json::Value& config_node) { +public: + void Init(const Json::Value &config_node) { // initialize image info at first ops_["InitInfo"] = std::make_shared(); - for (const auto& item : config_node) { + for (const auto &item : config_node) { auto op_name = item["type"].as(); ops_[op_name] = CreateOp(op_name); @@ -158,7 +160,7 @@ class Preprocessor { } } - std::shared_ptr CreateOp(const std::string& name) { + std::shared_ptr CreateOp(const std::string &name) { if (name == "DetResize") { return std::make_shared(); } else if (name == "DetPermute") { @@ -176,13 +178,13 @@ class Preprocessor { return nullptr; } - void Run(cv::Mat* im, ImageBlob* data); + void Run(cv::Mat *im, ImageBlob *data); - public: +public: static const std::vector RUN_ORDER; - private: +private: std::unordered_map> ops_; }; -} // namespace PPShiTu +} // namespace PPShiTu diff --git a/deploy/lite_shitu/include/utils.h b/deploy/lite_shitu/include/utils.h index a3b57c882561577defff97e384fb775b78204f36..b23cae31898f92cb55cf240ecc0bb544dba6bb05 100644 --- a/deploy/lite_shitu/include/utils.h +++ b/deploy/lite_shitu/include/utils.h @@ -38,6 +38,23 @@ struct ObjectResult { std::vector rec_result; }; -void nms(std::vector &input_boxes, float nms_threshold, bool rec_nms=false); +void nms(std::vector &input_boxes, float nms_threshold, + bool rec_nms = false); +template +static inline bool SortScorePairDescend(const std::pair &pair1, + const std::pair &pair2) { + return pair1.first > pair2.first; +} + +float RectOverlap(const ObjectResult &a, const ObjectResult &b); + +inline void +GetMaxScoreIndex(const std::vector &det_result, + const float threshold, + std::vector> &score_index_vec); + +void NMSBoxes(const std::vector det_result, + const float score_threshold, const float nms_threshold, + std::vector &indices); } // namespace PPShiTu diff --git a/deploy/lite_shitu/include/vector_search.h b/deploy/lite_shitu/include/vector_search.h index 89ef7733ab86c534a5c507cb4f87c9d4597dba15..49c95cc35edf9248d56b7a3a660285698cd6df8b 100644 --- a/deploy/lite_shitu/include/vector_search.h +++ b/deploy/lite_shitu/include/vector_search.h @@ -70,4 +70,4 @@ private: std::vector I; SearchResult sr; }; -} +} // namespace PPShiTu diff --git a/deploy/lite_shitu/src/config_parser.cc b/deploy/lite_shitu/src/config_parser.cc index d98b2f90f0a860189b8b3b12e9ffd5646dae1d24..09f09f782c93cdfc6fd5d41b97a630cbbafa5917 100644 --- a/deploy/lite_shitu/src/config_parser.cc +++ b/deploy/lite_shitu/src/config_parser.cc @@ -29,4 +29,4 @@ void load_jsonf(std::string jsonfile, Json::Value &jsondata) { } } -} // namespace PPShiTu +} // namespace PPShiTu diff --git a/deploy/lite_shitu/src/feature_extractor.cc b/deploy/lite_shitu/src/feature_extractor.cc index aca5c1cbbe5c70cd214c922609831e9350be28a0..67940f011eb9399aadc0aa5f38ad8d8dde197aa0 100644 --- a/deploy/lite_shitu/src/feature_extractor.cc +++ b/deploy/lite_shitu/src/feature_extractor.cc @@ -13,24 +13,29 @@ // limitations under the License. #include "include/feature_extractor.h" +#include +#include namespace PPShiTu { -void FeatureExtract::RunRecModel(const cv::Mat &img, - double &cost_time, +void FeatureExtract::RunRecModel(const cv::Mat &img, double &cost_time, std::vector &feature) { // Read img - cv::Mat resize_image = ResizeImage(img); - cv::Mat img_fp; - resize_image.convertTo(img_fp, CV_32FC3, scale); + this->resize_op_.Run_feature(img, img_fp, this->size, this->size); + this->normalize_op_.Run_feature(&img_fp, this->mean, this->std, this->scale); + std::vector input(1 * 3 * img_fp.rows * img_fp.cols, 0.0f); + this->permute_op_.Run_feature(&img_fp, input.data()); // Prepare input data from image std::unique_ptr input_tensor(std::move(this->predictor->GetInput(0))); - input_tensor->Resize({1, 3, img_fp.rows, img_fp.cols}); + input_tensor->Resize({1, 3, this->size, this->size}); auto *data0 = input_tensor->mutable_data(); - const float *dimg = reinterpret_cast(img_fp.data); - NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols); + // const float *dimg = reinterpret_cast(img_fp.data); + // NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols); + for (int i = 0; i < input.size(); ++i) { + data0[i] = input[i]; + } auto start = std::chrono::system_clock::now(); // Run predictor @@ -38,7 +43,7 @@ void FeatureExtract::RunRecModel(const cv::Mat &img, // Get output and post process std::unique_ptr output_tensor( - std::move(this->predictor->GetOutput(0))); //only one output + std::move(this->predictor->GetOutput(0))); // only one output auto end = std::chrono::system_clock::now(); auto duration = std::chrono::duration_cast(end - start); @@ -46,7 +51,7 @@ void FeatureExtract::RunRecModel(const cv::Mat &img, std::chrono::microseconds::period::num / std::chrono::microseconds::period::den; - //do postprocess + // do postprocess int output_size = 1; for (auto dim : output_tensor->shape()) { output_size *= dim; @@ -54,63 +59,15 @@ void FeatureExtract::RunRecModel(const cv::Mat &img, feature.resize(output_size); output_tensor->CopyToCpu(feature.data()); - //postprocess include sqrt or binarize. - //PostProcess(feature); + // postprocess include sqrt or binarize. + FeatureNorm(feature); return; } -// void FeatureExtract::PostProcess(std::vector &feature){ -// float feature_sqrt = std::sqrt(std::inner_product( -// feature.begin(), feature.end(), feature.begin(), 0.0f)); -// for (int i = 0; i < feature.size(); ++i) -// feature[i] /= feature_sqrt; -// } - -void FeatureExtract::NeonMeanScale(const float *din, float *dout, int size) { - - if (this->mean.size() != 3 || this->std.size() != 3) { - std::cerr << "[ERROR] mean or scale size must equal to 3\n"; - exit(1); - } - float32x4_t vmean0 = vdupq_n_f32(mean[0]); - float32x4_t vmean1 = vdupq_n_f32(mean[1]); - float32x4_t vmean2 = vdupq_n_f32(mean[2]); - float32x4_t vscale0 = vdupq_n_f32(std[0]); - float32x4_t vscale1 = vdupq_n_f32(std[1]); - float32x4_t vscale2 = vdupq_n_f32(std[2]); - - float *dout_c0 = dout; - float *dout_c1 = dout + size; - float *dout_c2 = dout + size * 2; - - int i = 0; - for (; i < size - 3; i += 4) { - float32x4x3_t vin3 = vld3q_f32(din); - float32x4_t vsub0 = vsubq_f32(vin3.val[0], vmean0); - float32x4_t vsub1 = vsubq_f32(vin3.val[1], vmean1); - float32x4_t vsub2 = vsubq_f32(vin3.val[2], vmean2); - float32x4_t vs0 = vmulq_f32(vsub0, vscale0); - float32x4_t vs1 = vmulq_f32(vsub1, vscale1); - float32x4_t vs2 = vmulq_f32(vsub2, vscale2); - vst1q_f32(dout_c0, vs0); - vst1q_f32(dout_c1, vs1); - vst1q_f32(dout_c2, vs2); - - din += 12; - dout_c0 += 4; - dout_c1 += 4; - dout_c2 += 4; - } - for (; i < size; i++) { - *(dout_c0++) = (*(din++) - this->mean[0]) * this->std[0]; - *(dout_c1++) = (*(din++) - this->mean[1]) * this->std[1]; - *(dout_c2++) = (*(din++) - this->mean[2]) * this->std[2]; - } -} - -cv::Mat FeatureExtract::ResizeImage(const cv::Mat &img) { - cv::Mat resize_img; - cv::resize(img, resize_img, cv::Size(this->size, this->size)); - return resize_img; -} +void FeatureExtract::FeatureNorm(std::vector &feature) { + float feature_sqrt = std::sqrt(std::inner_product( + feature.begin(), feature.end(), feature.begin(), 0.0f)); + for (int i = 0; i < feature.size(); ++i) + feature[i] /= feature_sqrt; } +} // namespace PPShiTu diff --git a/deploy/lite_shitu/src/main.cc b/deploy/lite_shitu/src/main.cc index 3f278dc778701a7a7591e74336e0f86fe52105ea..fb516c297d83c438b1b5df88732ad386377c781f 100644 --- a/deploy/lite_shitu/src/main.cc +++ b/deploy/lite_shitu/src/main.cc @@ -27,6 +27,7 @@ #include "include/feature_extractor.h" #include "include/object_detector.h" #include "include/preprocess_op.h" +#include "include/utils.h" #include "include/vector_search.h" #include "json/json.h" @@ -158,6 +159,11 @@ int main(int argc, char **argv) { << " [image_dir]>" << std::endl; return -1; } + + float rec_nms_threshold = 0.05; + if (RT_Config["Global"]["rec_nms_thresold"].isDouble()) + rec_nms_threshold = RT_Config["Global"]["rec_nms_thresold"].as(); + // Load model and create a object detector PPShiTu::ObjectDetector det( RT_Config, RT_Config["Global"]["det_model_path"].as(), @@ -174,6 +180,7 @@ int main(int argc, char **argv) { // for vector search std::vector feature; std::vector features; + std::vector indeices; double rec_time; if (!RT_Config["Global"]["infer_imgs"].as().empty() || !img_dir.empty()) { @@ -208,9 +215,9 @@ int main(int argc, char **argv) { RT_Config["Global"]["max_det_results"].as(), false, &det); // add the whole image for recognition to improve recall -// PPShiTu::ObjectResult result_whole_img = { -// {0, 0, srcimg.cols, srcimg.rows}, 0, 1.0}; -// det_result.push_back(result_whole_img); + PPShiTu::ObjectResult result_whole_img = { + {0, 0, srcimg.cols, srcimg.rows}, 0, 1.0}; + det_result.push_back(result_whole_img); // get rec result PPShiTu::SearchResult search_result; @@ -225,10 +232,18 @@ int main(int argc, char **argv) { // do vectore search search_result = searcher.Search(features.data(), det_result.size()); + for (int i = 0; i < det_result.size(); ++i) { + det_result[i].confidence = search_result.D[search_result.return_k * i]; + } + NMSBoxes(det_result, searcher.GetThreshold(), rec_nms_threshold, + indeices); PrintResult(img_path, det_result, searcher, search_result); batch_imgs.clear(); det_result.clear(); + features.clear(); + feature.clear(); + indeices.clear(); } } return 0; diff --git a/deploy/lite_shitu/src/object_detector.cc b/deploy/lite_shitu/src/object_detector.cc index ffea31bb9d76b1dd90eed2a90cd066b0edb20057..18388f7a5b0a4fd7b63c37269bd4eea81aad6db1 100644 --- a/deploy/lite_shitu/src/object_detector.cc +++ b/deploy/lite_shitu/src/object_detector.cc @@ -13,9 +13,9 @@ // limitations under the License. #include // for setprecision +#include "include/object_detector.h" #include #include -#include "include/object_detector.h" namespace PPShiTu { @@ -30,10 +30,10 @@ void ObjectDetector::LoadModel(std::string model_file, int num_theads) { } // Visualiztion MaskDetector results -cv::Mat VisualizeResult(const cv::Mat& img, - const std::vector& results, - const std::vector& lables, - const std::vector& colormap, +cv::Mat VisualizeResult(const cv::Mat &img, + const std::vector &results, + const std::vector &lables, + const std::vector &colormap, const bool is_rbox = false) { cv::Mat vis_img = img.clone(); for (int i = 0; i < results.size(); ++i) { @@ -75,24 +75,18 @@ cv::Mat VisualizeResult(const cv::Mat& img, origin.y = results[i].rect[1]; // Configure text background - cv::Rect text_back = cv::Rect(results[i].rect[0], - results[i].rect[1] - text_size.height, - text_size.width, - text_size.height); + cv::Rect text_back = + cv::Rect(results[i].rect[0], results[i].rect[1] - text_size.height, + text_size.width, text_size.height); // Draw text, and background cv::rectangle(vis_img, text_back, roi_color, -1); - cv::putText(vis_img, - text, - origin, - font_face, - font_scale, - cv::Scalar(255, 255, 255), - thickness); + cv::putText(vis_img, text, origin, font_face, font_scale, + cv::Scalar(255, 255, 255), thickness); } return vis_img; } -void ObjectDetector::Preprocess(const cv::Mat& ori_im) { +void ObjectDetector::Preprocess(const cv::Mat &ori_im) { // Clone the image : keep the original mat for postprocess cv::Mat im = ori_im.clone(); // cv::cvtColor(im, im, cv::COLOR_BGR2RGB); @@ -100,7 +94,7 @@ void ObjectDetector::Preprocess(const cv::Mat& ori_im) { } void ObjectDetector::Postprocess(const std::vector mats, - std::vector* result, + std::vector *result, std::vector bbox_num, bool is_rbox = false) { result->clear(); @@ -156,12 +150,11 @@ void ObjectDetector::Postprocess(const std::vector mats, } } -void ObjectDetector::Predict(const std::vector& imgs, - const int warmup, +void ObjectDetector::Predict(const std::vector &imgs, const int warmup, const int repeats, - std::vector* result, - std::vector* bbox_num, - std::vector* times) { + std::vector *result, + std::vector *bbox_num, + std::vector *times) { auto preprocess_start = std::chrono::steady_clock::now(); int batch_size = imgs.size(); @@ -180,29 +173,29 @@ void ObjectDetector::Predict(const std::vector& imgs, scale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1]; // TODO: reduce cost time - in_data_all.insert( - in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end()); + in_data_all.insert(in_data_all.end(), inputs_.im_data_.begin(), + inputs_.im_data_.end()); } auto preprocess_end = std::chrono::steady_clock::now(); std::vector output_data_list_; // Prepare input tensor auto input_names = predictor_->GetInputNames(); - for (const auto& tensor_name : input_names) { + for (const auto &tensor_name : input_names) { auto in_tensor = predictor_->GetInputByName(tensor_name); if (tensor_name == "image") { int rh = inputs_.in_net_shape_[0]; int rw = inputs_.in_net_shape_[1]; in_tensor->Resize({batch_size, 3, rh, rw}); - auto* inptr = in_tensor->mutable_data(); + auto *inptr = in_tensor->mutable_data(); std::copy_n(in_data_all.data(), in_data_all.size(), inptr); } else if (tensor_name == "im_shape") { in_tensor->Resize({batch_size, 2}); - auto* inptr = in_tensor->mutable_data(); + auto *inptr = in_tensor->mutable_data(); std::copy_n(im_shape_all.data(), im_shape_all.size(), inptr); } else if (tensor_name == "scale_factor") { in_tensor->Resize({batch_size, 2}); - auto* inptr = in_tensor->mutable_data(); + auto *inptr = in_tensor->mutable_data(); std::copy_n(scale_factor_all.data(), scale_factor_all.size(), inptr); } } @@ -216,7 +209,7 @@ void ObjectDetector::Predict(const std::vector& imgs, if (config_.arch_ == "PicoDet") { for (int j = 0; j < output_names.size(); j++) { auto output_tensor = predictor_->GetTensor(output_names[j]); - const float* outptr = output_tensor->data(); + const float *outptr = output_tensor->data(); std::vector output_shape = output_tensor->shape(); output_data_list_.push_back(outptr); } @@ -242,7 +235,7 @@ void ObjectDetector::Predict(const std::vector& imgs, if (config_.arch_ == "PicoDet") { for (int i = 0; i < output_names.size(); i++) { auto output_tensor = predictor_->GetTensor(output_names[i]); - const float* outptr = output_tensor->data(); + const float *outptr = output_tensor->data(); std::vector output_shape = output_tensor->shape(); if (i == 0) { num_class = output_shape[2]; @@ -268,16 +261,15 @@ void ObjectDetector::Predict(const std::vector& imgs, std::cerr << "[WARNING] No object detected." << std::endl; } output_data_.resize(output_size); - std::copy_n( - output_tensor->mutable_data(), output_size, output_data_.data()); + std::copy_n(output_tensor->mutable_data(), output_size, + output_data_.data()); int out_bbox_num_size = 1; for (int j = 0; j < out_bbox_num_shape.size(); ++j) { out_bbox_num_size *= out_bbox_num_shape[j]; } out_bbox_num_data_.resize(out_bbox_num_size); - std::copy_n(out_bbox_num->mutable_data(), - out_bbox_num_size, + std::copy_n(out_bbox_num->mutable_data(), out_bbox_num_size, out_bbox_num_data_.data()); } // Postprocessing result @@ -285,9 +277,8 @@ void ObjectDetector::Predict(const std::vector& imgs, result->clear(); if (config_.arch_ == "PicoDet") { PPShiTu::PicoDetPostProcess( - result, output_data_list_, config_.fpn_stride_, - inputs_.im_shape_, inputs_.scale_factor_, - config_.nms_info_["score_threshold"].as(), + result, output_data_list_, config_.fpn_stride_, inputs_.im_shape_, + inputs_.scale_factor_, config_.nms_info_["score_threshold"].as(), config_.nms_info_["nms_threshold"].as(), num_class, reg_max); bbox_num->push_back(result->size()); } else { @@ -326,4 +317,4 @@ std::vector GenerateColorMap(int num_class) { return colormap; } -} // namespace PPShiTu +} // namespace PPShiTu diff --git a/deploy/lite_shitu/src/picodet_postprocess.cc b/deploy/lite_shitu/src/picodet_postprocess.cc index cde914c26db21813b1a52137385fa1509cb825f7..04054efa752ca2c2c1ffce504e0a0d48f259eff3 100644 --- a/deploy/lite_shitu/src/picodet_postprocess.cc +++ b/deploy/lite_shitu/src/picodet_postprocess.cc @@ -47,9 +47,9 @@ int activation_function_softmax(const _Tp *src, _Tp *dst, int length) { } // PicoDet decode -PPShiTu::ObjectResult -disPred2Bbox(const float *&dfl_det, int label, float score, int x, int y, - int stride, std::vector im_shape, int reg_max) { +PPShiTu::ObjectResult disPred2Bbox(const float *&dfl_det, int label, + float score, int x, int y, int stride, + std::vector im_shape, int reg_max) { float ct_x = (x + 0.5) * stride; float ct_y = (y + 0.5) * stride; std::vector dis_pred; diff --git a/deploy/lite_shitu/src/preprocess_op.cc b/deploy/lite_shitu/src/preprocess_op.cc index 9c74d6ee7241c93b9fb206317f634e523425793e..974dcbfc6366590c790314599ae3cbe446dafd86 100644 --- a/deploy/lite_shitu/src/preprocess_op.cc +++ b/deploy/lite_shitu/src/preprocess_op.cc @@ -20,7 +20,7 @@ namespace PPShiTu { -void InitInfo::Run(cv::Mat* im, ImageBlob* data) { +void InitInfo::Run(cv::Mat *im, ImageBlob *data) { data->im_shape_ = {static_cast(im->rows), static_cast(im->cols)}; data->scale_factor_ = {1., 1.}; @@ -28,10 +28,10 @@ void InitInfo::Run(cv::Mat* im, ImageBlob* data) { static_cast(im->cols)}; } -void NormalizeImage::Run(cv::Mat* im, ImageBlob* data) { +void NormalizeImage::Run(cv::Mat *im, ImageBlob *data) { double e = 1.0; if (is_scale_) { - e *= 1./255.0; + e *= 1. / 255.0; } (*im).convertTo(*im, CV_32FC3, e); for (int h = 0; h < im->rows; h++) { @@ -46,35 +46,61 @@ void NormalizeImage::Run(cv::Mat* im, ImageBlob* data) { } } -void Permute::Run(cv::Mat* im, ImageBlob* data) { +void NormalizeImage::Run_feature(cv::Mat *im, const std::vector &mean, + const std::vector &std, float scale) { + (*im).convertTo(*im, CV_32FC3, scale); + for (int h = 0; h < im->rows; h++) { + for (int w = 0; w < im->cols; w++) { + im->at(h, w)[0] = + (im->at(h, w)[0] - mean[0]) / std[0]; + im->at(h, w)[1] = + (im->at(h, w)[1] - mean[1]) / std[1]; + im->at(h, w)[2] = + (im->at(h, w)[2] - mean[2]) / std[2]; + } + } +} + +void Permute::Run(cv::Mat *im, ImageBlob *data) { (*im).convertTo(*im, CV_32FC3); int rh = im->rows; int rw = im->cols; int rc = im->channels(); (data->im_data_).resize(rc * rh * rw); - float* base = (data->im_data_).data(); + float *base = (data->im_data_).data(); for (int i = 0; i < rc; ++i) { cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i); } } -void Resize::Run(cv::Mat* im, ImageBlob* data) { +void Permute::Run_feature(const cv::Mat *im, float *data) { + int rh = im->rows; + int rw = im->cols; + int rc = im->channels(); + for (int i = 0; i < rc; ++i) { + cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i); + } +} + +void Resize::Run(cv::Mat *im, ImageBlob *data) { auto resize_scale = GenerateScale(*im); data->im_shape_ = {static_cast(im->cols * resize_scale.first), static_cast(im->rows * resize_scale.second)}; data->in_net_shape_ = {static_cast(im->cols * resize_scale.first), static_cast(im->rows * resize_scale.second)}; - cv::resize( - *im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_); + cv::resize(*im, *im, cv::Size(), resize_scale.first, resize_scale.second, + interp_); data->im_shape_ = { - static_cast(im->rows), static_cast(im->cols), + static_cast(im->rows), + static_cast(im->cols), }; data->scale_factor_ = { - resize_scale.second, resize_scale.first, + resize_scale.second, + resize_scale.first, }; } -std::pair Resize::GenerateScale(const cv::Mat& im) { +std::pair Resize::GenerateScale(const cv::Mat &im) { std::pair resize_scale; int origin_w = im.cols; int origin_h = im.rows; @@ -101,7 +127,30 @@ std::pair Resize::GenerateScale(const cv::Mat& im) { return resize_scale; } -void PadStride::Run(cv::Mat* im, ImageBlob* data) { +void Resize::Run_feature(const cv::Mat &img, cv::Mat &resize_img, + int resize_short_size, int size) { + int resize_h = 0; + int resize_w = 0; + if (size > 0) { + resize_h = size; + resize_w = size; + } else { + int w = img.cols; + int h = img.rows; + + float ratio = 1.f; + if (h < w) { + ratio = float(resize_short_size) / float(h); + } else { + ratio = float(resize_short_size) / float(w); + } + resize_h = round(float(h) * ratio); + resize_w = round(float(w) * ratio); + } + cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); +} + +void PadStride::Run(cv::Mat *im, ImageBlob *data) { if (stride_ <= 0) { return; } @@ -110,48 +159,44 @@ void PadStride::Run(cv::Mat* im, ImageBlob* data) { int rw = im->cols; int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_; int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_; - cv::copyMakeBorder( - *im, *im, 0, nh - rh, 0, nw - rw, cv::BORDER_CONSTANT, cv::Scalar(0)); + cv::copyMakeBorder(*im, *im, 0, nh - rh, 0, nw - rw, cv::BORDER_CONSTANT, + cv::Scalar(0)); data->in_net_shape_ = { - static_cast(im->rows), static_cast(im->cols), + static_cast(im->rows), + static_cast(im->cols), }; } -void TopDownEvalAffine::Run(cv::Mat* im, ImageBlob* data) { +void TopDownEvalAffine::Run(cv::Mat *im, ImageBlob *data) { cv::resize(*im, *im, cv::Size(trainsize_[0], trainsize_[1]), 0, 0, interp_); // todo: Simd::ResizeBilinear(); data->in_net_shape_ = { - static_cast(trainsize_[1]), static_cast(trainsize_[0]), + static_cast(trainsize_[1]), + static_cast(trainsize_[0]), }; } // Preprocessor op running order -const std::vector Preprocessor::RUN_ORDER = {"InitInfo", - "DetTopDownEvalAffine", - "DetResize", - "DetNormalizeImage", - "DetPadStride", - "DetPermute"}; - -void Preprocessor::Run(cv::Mat* im, ImageBlob* data) { - for (const auto& name : RUN_ORDER) { +const std::vector Preprocessor::RUN_ORDER = { + "InitInfo", "DetTopDownEvalAffine", "DetResize", + "DetNormalizeImage", "DetPadStride", "DetPermute"}; + +void Preprocessor::Run(cv::Mat *im, ImageBlob *data) { + for (const auto &name : RUN_ORDER) { if (ops_.find(name) != ops_.end()) { ops_[name]->Run(im, data); } } } -void CropImg(cv::Mat& img, - cv::Mat& crop_img, - std::vector& area, - std::vector& center, - std::vector& scale, +void CropImg(cv::Mat &img, cv::Mat &crop_img, std::vector &area, + std::vector ¢er, std::vector &scale, float expandratio) { int crop_x1 = std::max(0, area[0]); int crop_y1 = std::max(0, area[1]); int crop_x2 = std::min(img.cols - 1, area[2]); int crop_y2 = std::min(img.rows - 1, area[3]); - + int center_x = (crop_x1 + crop_x2) / 2.; int center_y = (crop_y1 + crop_y2) / 2.; int half_h = (crop_y2 - crop_y1) / 2.; @@ -182,4 +227,4 @@ void CropImg(cv::Mat& img, scale.emplace_back((crop_y2 - crop_y1)); } -} // namespace PPShiTu +} // namespace PPShiTu diff --git a/deploy/lite_shitu/src/utils.cc b/deploy/lite_shitu/src/utils.cc index 3bc461770e2d79e33e4de91a3f4cea8c131eb7ad..a687f071c15ebe97915cddf98950042ab9cf8b4d 100644 --- a/deploy/lite_shitu/src/utils.cc +++ b/deploy/lite_shitu/src/utils.cc @@ -54,4 +54,53 @@ void nms(std::vector &input_boxes, float nms_threshold, } } +float RectOverlap(const ObjectResult &a, const ObjectResult &b) { + float Aa = (a.rect[2] - a.rect[0] + 1) * (a.rect[3] - a.rect[1] + 1); + float Ab = (b.rect[2] - b.rect[0] + 1) * (b.rect[3] - b.rect[1] + 1); + + int iou_w = max(min(a.rect[2], b.rect[2]) - max(a.rect[0], b.rect[0]) + 1, 0); + int iou_h = max(min(a.rect[3], b.rect[3]) - max(a.rect[1], b.rect[1]) + 1, 0); + float Aab = iou_w * iou_h; + return Aab / (Aa + Ab - Aab); +} + +inline void +GetMaxScoreIndex(const std::vector &det_result, + const float threshold, + std::vector> &score_index_vec) { + // Generate index score pairs. + for (size_t i = 0; i < det_result.size(); ++i) { + if (det_result[i].confidence > threshold) { + score_index_vec.push_back(std::make_pair(det_result[i].confidence, i)); + } + } + + // Sort the score pair according to the scores in descending order + std::stable_sort(score_index_vec.begin(), score_index_vec.end(), + SortScorePairDescend); +} + +void NMSBoxes(const std::vector det_result, + const float score_threshold, const float nms_threshold, + std::vector &indices) { + int a = 1; + // Get top_k scores (with corresponding indices). + std::vector> score_index_vec; + GetMaxScoreIndex(det_result, score_threshold, score_index_vec); + + // Do nms + indices.clear(); + for (size_t i = 0; i < score_index_vec.size(); ++i) { + const int idx = score_index_vec[i].second; + bool keep = true; + for (int k = 0; k < (int)indices.size() && keep; ++k) { + const int kept_idx = indices[k]; + float overlap = RectOverlap(det_result[idx], det_result[kept_idx]); + keep = overlap <= nms_threshold; + } + if (keep) + indices.push_back(idx); + } +} + } // namespace PPShiTu diff --git a/deploy/lite_shitu/src/vector_search.cc b/deploy/lite_shitu/src/vector_search.cc index ea848959b651eb04effc25ad9efb7eb497ef2025..f9c06a83d2abc0401d8e480a57244a43ba6fc7aa 100644 --- a/deploy/lite_shitu/src/vector_search.cc +++ b/deploy/lite_shitu/src/vector_search.cc @@ -64,4 +64,4 @@ const SearchResult &VectorSearch::Search(float *feature, int query_number) { const std::string &VectorSearch::GetLabel(faiss::Index::idx_t ind) { return this->id_map.at(ind); } -} \ No newline at end of file +} // namespace PPShiTu diff --git a/deploy/paddle2onnx/readme.md b/deploy/paddle2onnx/readme.md index d1307ea84e3d7a1465c7c464d3b41dfa7613a046..bacc202806bf1a60e85790969edcb70f1489f7df 100644 --- a/deploy/paddle2onnx/readme.md +++ b/deploy/paddle2onnx/readme.md @@ -1,53 +1,59 @@ # paddle2onnx 模型转化与预测 -本章节介绍 ResNet50_vd 模型如何转化为 ONNX 模型,并基于 ONNX 引擎预测。 +## 目录 + +- [paddle2onnx 模型转化与预测](#paddle2onnx-模型转化与预测) + - [1. 环境准备](#1-环境准备) + - [2. 模型转换](#2-模型转换) + - [3. onnx 预测](#3-onnx-预测) ## 1. 环境准备 需要准备 Paddle2ONNX 模型转化环境,和 ONNX 模型预测环境。 -Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式,算子目前稳定支持导出 ONNX Opset 9~11,部分Paddle算子支持更低的ONNX Opset转换。 -更多细节可参考 [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX/blob/develop/README_zh.md) +Paddle2ONNX 支持将 PaddlePaddle inference 模型格式转化到 ONNX 模型格式,算子目前稳定支持导出 ONNX Opset 9~11。 +更多细节可参考 [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX#paddle2onnx) - 安装 Paddle2ONNX -``` -python3.7 -m pip install paddle2onnx -``` + ```shell + python3.7 -m pip install paddle2onnx + ``` -- 安装 ONNX 运行时 -``` -python3.7 -m pip install onnxruntime -``` +- 安装 ONNX 推理引擎 + ```shell + python3.7 -m pip install onnxruntime + ``` +下面以 ResNet50_vd 为例,介绍如何将 PaddlePaddle inference 模型转换为 ONNX 模型,并基于 ONNX 引擎预测。 ## 2. 模型转换 - ResNet50_vd inference模型下载 -``` -cd deploy -mkdir models && cd models -wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar -cd .. -``` + ```shell + cd deploy + mkdir models && cd models + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar + cd .. + ``` - 模型转换 -使用 Paddle2ONNX 将 Paddle 静态图模型转换为 ONNX 模型格式: -``` -paddle2onnx --model_dir=./models/ResNet50_vd_infer/ \ ---model_filename=inference.pdmodel \ ---params_filename=inference.pdiparams \ ---save_file=./models/ResNet50_vd_infer/inference.onnx \ ---opset_version=10 \ ---enable_onnx_checker=True -``` + 使用 Paddle2ONNX 将 Paddle 静态图模型转换为 ONNX 模型格式: + ```shell + paddle2onnx --model_dir=./models/ResNet50_vd_infer/ \ + --model_filename=inference.pdmodel \ + --params_filename=inference.pdiparams \ + --save_file=./models/ResNet50_vd_infer/inference.onnx \ + --opset_version=10 \ + --enable_onnx_checker=True + ``` -执行完毕后,ONNX 模型 `inference.onnx` 会被保存在 `./models/ResNet50_vd_infer/` 路径下 +转换完毕后,生成的ONNX 模型 `inference.onnx` 会被保存在 `./models/ResNet50_vd_infer/` 路径下 ## 3. onnx 预测 执行如下命令: -``` +```shell python3.7 python/predict_cls.py \ -c configs/inference_cls.yaml \ -o Global.use_onnx=True \ diff --git a/deploy/paddle2onnx/readme_en.md b/deploy/paddle2onnx/readme_en.md new file mode 100644 index 0000000000000000000000000000000000000000..6df13e5fe31805d642432dea8526661e82b6e95b --- /dev/null +++ b/deploy/paddle2onnx/readme_en.md @@ -0,0 +1,59 @@ +# Paddle2ONNX: Converting To ONNX and Deployment + +This section introduce that how to convert the Paddle Inference Model ResNet50_vd to ONNX model and deployment based on ONNX engine. + +## 1. Installation + +First, you need to install Paddle2ONNX and onnxruntime. Paddle2ONNX is a toolkit to convert Paddle Inference Model to ONNX model. Please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX/blob/develop/README_en.md) for more information. + +- Paddle2ONNX Installation +``` +python3.7 -m pip install paddle2onnx +``` + +- ONNX Installation +``` +python3.7 -m pip install onnxruntime +``` + +## 2. Converting to ONNX + +Download the Paddle Inference Model ResNet50_vd: + +``` +cd deploy +mkdir models && cd models +wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar +cd .. +``` + +Converting to ONNX model: + +``` +paddle2onnx --model_dir=./models/ResNet50_vd_infer/ \ +--model_filename=inference.pdmodel \ +--params_filename=inference.pdiparams \ +--save_file=./models/ResNet50_vd_infer/inference.onnx \ +--opset_version=10 \ +--enable_onnx_checker=True +``` + +After running the above command, the ONNX model file converted would be save in `./models/ResNet50_vd_infer/`. + +## 3. Deployment + +Deployment with ONNX model, command is as shown below. + +``` +python3.7 python/predict_cls.py \ +-c configs/inference_cls.yaml \ +-o Global.use_onnx=True \ +-o Global.use_gpu=False \ +-o Global.inference_model_dir=./models/ResNet50_vd_infer +``` + +The prediction results: + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [153, 204, 229, 332, 155], score(s): [0.69, 0.10, 0.02, 0.01, 0.01], label_name(s): ['Maltese dog, Maltese terrier, Maltese', 'Lhasa, Lhasa apso', 'Old English sheepdog, bobtail', 'Angora, Angora rabbit', 'Shih-Tzu'] +``` diff --git a/deploy/paddleserving/build_server.sh b/deploy/paddleserving/build_server.sh new file mode 100644 index 0000000000000000000000000000000000000000..1329a3684ff72862858ee25c0a938bd61ff654ae --- /dev/null +++ b/deploy/paddleserving/build_server.sh @@ -0,0 +1,88 @@ +# 使用镜像: +# registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82 + +# 编译Serving Server: + +# client和app可以直接使用release版本 + +# server因为加入了自定义OP,需要重新编译 + +# 默认编译时的${PWD}=PaddleClas/deploy/paddleserving/ + +python_name=${1:-'python'} + +apt-get update +apt install -y libcurl4-openssl-dev libbz2-dev +wget -nc https://paddle-serving.bj.bcebos.com/others/centos_ssl.tar +tar xf centos_ssl.tar +rm -rf centos_ssl.tar +mv libcrypto.so.1.0.2k /usr/lib/libcrypto.so.1.0.2k +mv libssl.so.1.0.2k /usr/lib/libssl.so.1.0.2k +ln -sf /usr/lib/libcrypto.so.1.0.2k /usr/lib/libcrypto.so.10 +ln -sf /usr/lib/libssl.so.1.0.2k /usr/lib/libssl.so.10 +ln -sf /usr/lib/libcrypto.so.10 /usr/lib/libcrypto.so +ln -sf /usr/lib/libssl.so.10 /usr/lib/libssl.so + +# 安装go依赖 +rm -rf /usr/local/go +wget -qO- https://paddle-ci.cdn.bcebos.com/go1.17.2.linux-amd64.tar.gz | tar -xz -C /usr/local +export GOROOT=/usr/local/go +export GOPATH=/root/gopath +export PATH=$PATH:$GOPATH/bin:$GOROOT/bin +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.15.2 +go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.15.2 +go install github.com/golang/protobuf/protoc-gen-go@v1.4.3 +go install google.golang.org/grpc@v1.33.0 +go env -w GO111MODULE=auto + +# 下载opencv库 +wget https://paddle-qa.bj.bcebos.com/PaddleServing/opencv3.tar.gz +tar -xvf opencv3.tar.gz +rm -rf opencv3.tar.gz +export OPENCV_DIR=$PWD/opencv3 + +# clone Serving +git clone https://github.com/PaddlePaddle/Serving.git -b develop --depth=1 + +cd Serving # PaddleClas/deploy/paddleserving/Serving +export Serving_repo_path=$PWD +git submodule update --init --recursive +${python_name} -m pip install -r python/requirements.txt + +# set env +export PYTHON_INCLUDE_DIR=$(${python_name} -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") +export PYTHON_LIBRARIES=$(${python_name} -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR'))") +export PYTHON_EXECUTABLE=`which ${python_name}` + +export CUDA_PATH='/usr/local/cuda' +export CUDNN_LIBRARY='/usr/local/cuda/lib64/' +export CUDA_CUDART_LIBRARY='/usr/local/cuda/lib64/' +export TENSORRT_LIBRARY_PATH='/usr/local/TensorRT6-cuda10.1-cudnn7/targets/x86_64-linux-gnu/' + +# cp 自定义OP代码 +\cp ../preprocess/general_clas_op.* ${Serving_repo_path}/core/general-server/op +\cp ../preprocess/preprocess_op.* ${Serving_repo_path}/core/predictor/tools/pp_shitu_tools + +# 编译Server +mkdir server-build-gpu-opencv +cd server-build-gpu-opencv +cmake -DPYTHON_INCLUDE_DIR=$PYTHON_INCLUDE_DIR \ +-DPYTHON_LIBRARIES=$PYTHON_LIBRARIES \ +-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \ +-DCUDA_TOOLKIT_ROOT_DIR=${CUDA_PATH} \ +-DCUDNN_LIBRARY=${CUDNN_LIBRARY} \ +-DCUDA_CUDART_LIBRARY=${CUDA_CUDART_LIBRARY} \ +-DTENSORRT_ROOT=${TENSORRT_LIBRARY_PATH} \ +-DOPENCV_DIR=${OPENCV_DIR} \ +-DWITH_OPENCV=ON \ +-DSERVER=ON \ +-DWITH_GPU=ON .. +make -j32 + +${python_name} -m pip install python/dist/paddle* + +# export SERVING_BIN +export SERVING_BIN=$PWD/core/general-server/serving +cd ../../ \ No newline at end of file diff --git a/deploy/paddleserving/config.yml b/deploy/paddleserving/config.yml index d9f464dd093d5a3d0ac34a61f4af17e3792fcd86..92d8297f9f23a4082cb0a499ca4c172e71d79caf 100644 --- a/deploy/paddleserving/config.yml +++ b/deploy/paddleserving/config.yml @@ -30,4 +30,4 @@ op: client_type: local_predictor #Fetch结果列表,以client_config中fetch_var的alias_name为准 - fetch_list: ["prediction"] + fetch_list: ["prediction"] diff --git a/deploy/paddleserving/preprocess/general_clas_op.cpp b/deploy/paddleserving/preprocess/general_clas_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e0ab48fa52da70a558b34e7ab1deda52675e99bc --- /dev/null +++ b/deploy/paddleserving/preprocess/general_clas_op.cpp @@ -0,0 +1,206 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "core/general-server/op/general_clas_op.h" +#include "core/predictor/framework/infer.h" +#include "core/predictor/framework/memory.h" +#include "core/predictor/framework/resource.h" +#include "core/util/include/timer.h" +#include +#include +#include +#include + +namespace baidu { +namespace paddle_serving { +namespace serving { + +using baidu::paddle_serving::Timer; +using baidu::paddle_serving::predictor::MempoolWrapper; +using baidu::paddle_serving::predictor::general_model::Tensor; +using baidu::paddle_serving::predictor::general_model::Response; +using baidu::paddle_serving::predictor::general_model::Request; +using baidu::paddle_serving::predictor::InferManager; +using baidu::paddle_serving::predictor::PaddleGeneralModelConfig; + +int GeneralClasOp::inference() { + VLOG(2) << "Going to run inference"; + const std::vector pre_node_names = pre_names(); + if (pre_node_names.size() != 1) { + LOG(ERROR) << "This op(" << op_name() + << ") can only have one predecessor op, but received " + << pre_node_names.size(); + return -1; + } + const std::string pre_name = pre_node_names[0]; + + const GeneralBlob *input_blob = get_depend_argument(pre_name); + if (!input_blob) { + LOG(ERROR) << "input_blob is nullptr,error"; + return -1; + } + uint64_t log_id = input_blob->GetLogId(); + VLOG(2) << "(logid=" << log_id << ") Get precedent op name: " << pre_name; + + GeneralBlob *output_blob = mutable_data(); + if (!output_blob) { + LOG(ERROR) << "output_blob is nullptr,error"; + return -1; + } + output_blob->SetLogId(log_id); + + if (!input_blob) { + LOG(ERROR) << "(logid=" << log_id + << ") Failed mutable depended argument, op:" << pre_name; + return -1; + } + + const TensorVector *in = &input_blob->tensor_vector; + TensorVector *out = &output_blob->tensor_vector; + + int batch_size = input_blob->_batch_size; + output_blob->_batch_size = batch_size; + VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size; + + Timer timeline; + int64_t start = timeline.TimeStampUS(); + timeline.Start(); + + // only support string type + + char *total_input_ptr = static_cast(in->at(0).data.data()); + std::string base64str = total_input_ptr; + + cv::Mat img = Base2Mat(base64str); + + // RGB2BGR + cv::cvtColor(img, img, cv::COLOR_BGR2RGB); + + // Resize + cv::Mat resize_img; + resize_op_.Run(img, resize_img, resize_short_size_); + + // CenterCrop + crop_op_.Run(resize_img, crop_size_); + + // Normalize + normalize_op_.Run(&resize_img, mean_, scale_, is_scale_); + + // Permute + std::vector input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f); + permute_op_.Run(&resize_img, input.data()); + float maxValue = *max_element(input.begin(), input.end()); + float minValue = *min_element(input.begin(), input.end()); + + TensorVector *real_in = new TensorVector(); + if (!real_in) { + LOG(ERROR) << "real_in is nullptr,error"; + return -1; + } + + std::vector input_shape; + int in_num = 0; + void *databuf_data = NULL; + char *databuf_char = NULL; + size_t databuf_size = 0; + + input_shape = {1, 3, resize_img.rows, resize_img.cols}; + in_num = std::accumulate(input_shape.begin(), input_shape.end(), 1, + std::multiplies()); + + databuf_size = in_num * sizeof(float); + databuf_data = MempoolWrapper::instance().malloc(databuf_size); + if (!databuf_data) { + LOG(ERROR) << "Malloc failed, size: " << databuf_size; + return -1; + } + + memcpy(databuf_data, input.data(), databuf_size); + databuf_char = reinterpret_cast(databuf_data); + paddle::PaddleBuf paddleBuf(databuf_char, databuf_size); + paddle::PaddleTensor tensor_in; + tensor_in.name = in->at(0).name; + tensor_in.dtype = paddle::PaddleDType::FLOAT32; + tensor_in.shape = {1, 3, resize_img.rows, resize_img.cols}; + tensor_in.lod = in->at(0).lod; + tensor_in.data = paddleBuf; + real_in->push_back(tensor_in); + + if (InferManager::instance().infer(engine_name().c_str(), real_in, out, + batch_size)) { + LOG(ERROR) << "(logid=" << log_id + << ") Failed do infer in fluid model: " << engine_name().c_str(); + return -1; + } + + int64_t end = timeline.TimeStampUS(); + CopyBlobInfo(input_blob, output_blob); + AddBlobInfo(output_blob, start); + AddBlobInfo(output_blob, end); + return 0; +} + +cv::Mat GeneralClasOp::Base2Mat(std::string &base64_data) { + cv::Mat img; + std::string s_mat; + s_mat = base64Decode(base64_data.data(), base64_data.size()); + std::vector base64_img(s_mat.begin(), s_mat.end()); + img = cv::imdecode(base64_img, cv::IMREAD_COLOR); // CV_LOAD_IMAGE_COLOR + return img; +} + +std::string GeneralClasOp::base64Decode(const char *Data, int DataByte) { + const char DecodeTable[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 62, // '+' + 0, 0, 0, + 63, // '/' + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // '0'-'9' + 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // 'A'-'Z' + 0, 0, 0, 0, 0, 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // 'a'-'z' + }; + + std::string strDecode; + int nValue; + int i = 0; + while (i < DataByte) { + if (*Data != '\r' && *Data != '\n') { + nValue = DecodeTable[*Data++] << 18; + nValue += DecodeTable[*Data++] << 12; + strDecode += (nValue & 0x00FF0000) >> 16; + if (*Data != '=') { + nValue += DecodeTable[*Data++] << 6; + strDecode += (nValue & 0x0000FF00) >> 8; + if (*Data != '=') { + nValue += DecodeTable[*Data++]; + strDecode += nValue & 0x000000FF; + } + } + i += 4; + } else // 回车换行,跳过 + { + Data++; + i++; + } + } + return strDecode; +} +DEFINE_OP(GeneralClasOp); +} // namespace serving +} // namespace paddle_serving +} // namespace baidu diff --git a/deploy/paddleserving/preprocess/general_clas_op.h b/deploy/paddleserving/preprocess/general_clas_op.h new file mode 100644 index 0000000000000000000000000000000000000000..69b7a8e005872d7b66b9a61265ca5798b4ac8bab --- /dev/null +++ b/deploy/paddleserving/preprocess/general_clas_op.h @@ -0,0 +1,70 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "core/general-server/general_model_service.pb.h" +#include "core/general-server/op/general_infer_helper.h" +#include "core/predictor/tools/pp_shitu_tools/preprocess_op.h" +#include "paddle_inference_api.h" // NOLINT +#include +#include + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace baidu { +namespace paddle_serving { +namespace serving { + +class GeneralClasOp + : public baidu::paddle_serving::predictor::OpWithChannel { +public: + typedef std::vector TensorVector; + + DECLARE_OP(GeneralClasOp); + + int inference(); + +private: + // clas preprocess + std::vector mean_ = {0.485f, 0.456f, 0.406f}; + std::vector scale_ = {0.229f, 0.224f, 0.225f}; + bool is_scale_ = true; + + int resize_short_size_ = 256; + int crop_size_ = 224; + + PaddleClas::ResizeImg resize_op_; + PaddleClas::Normalize normalize_op_; + PaddleClas::Permute permute_op_; + PaddleClas::CenterCropImg crop_op_; + + // read pics + cv::Mat Base2Mat(std::string &base64_data); + std::string base64Decode(const char *Data, int DataByte); +}; + +} // namespace serving +} // namespace paddle_serving +} // namespace baidu diff --git a/deploy/paddleserving/preprocess/preprocess_op.cpp b/deploy/paddleserving/preprocess/preprocess_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9c79342ceda115fe3c213bb6f5d32c6e56f2380a --- /dev/null +++ b/deploy/paddleserving/preprocess/preprocess_op.cpp @@ -0,0 +1,149 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include "paddle_api.h" +#include "paddle_inference_api.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "preprocess_op.h" + +namespace Feature { + +void Permute::Run(const cv::Mat *im, float *data) { + int rh = im->rows; + int rw = im->cols; + int rc = im->channels(); + for (int i = 0; i < rc; ++i) { + cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i); + } +} + +void Normalize::Run(cv::Mat *im, const std::vector &mean, + const std::vector &std, float scale) { + (*im).convertTo(*im, CV_32FC3, scale); + for (int h = 0; h < im->rows; h++) { + for (int w = 0; w < im->cols; w++) { + im->at(h, w)[0] = + (im->at(h, w)[0] - mean[0]) / std[0]; + im->at(h, w)[1] = + (im->at(h, w)[1] - mean[1]) / std[1]; + im->at(h, w)[2] = + (im->at(h, w)[2] - mean[2]) / std[2]; + } + } +} + +void CenterCropImg::Run(cv::Mat &img, const int crop_size) { + int resize_w = img.cols; + int resize_h = img.rows; + int w_start = int((resize_w - crop_size) / 2); + int h_start = int((resize_h - crop_size) / 2); + cv::Rect rect(w_start, h_start, crop_size, crop_size); + img = img(rect); +} + +void ResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, + int resize_short_size, int size) { + int resize_h = 0; + int resize_w = 0; + if (size > 0) { + resize_h = size; + resize_w = size; + } else { + int w = img.cols; + int h = img.rows; + + float ratio = 1.f; + if (h < w) { + ratio = float(resize_short_size) / float(h); + } else { + ratio = float(resize_short_size) / float(w); + } + resize_h = round(float(h) * ratio); + resize_w = round(float(w) * ratio); + } + cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); +} + +} // namespace Feature + +namespace PaddleClas { +void Permute::Run(const cv::Mat *im, float *data) { + int rh = im->rows; + int rw = im->cols; + int rc = im->channels(); + for (int i = 0; i < rc; ++i) { + cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i); + } +} + +void Normalize::Run(cv::Mat *im, const std::vector &mean, + const std::vector &scale, const bool is_scale) { + double e = 1.0; + if (is_scale) { + e /= 255.0; + } + (*im).convertTo(*im, CV_32FC3, e); + for (int h = 0; h < im->rows; h++) { + for (int w = 0; w < im->cols; w++) { + im->at(h, w)[0] = + (im->at(h, w)[0] - mean[0]) / scale[0]; + im->at(h, w)[1] = + (im->at(h, w)[1] - mean[1]) / scale[1]; + im->at(h, w)[2] = + (im->at(h, w)[2] - mean[2]) / scale[2]; + } + } +} + +void CenterCropImg::Run(cv::Mat &img, const int crop_size) { + int resize_w = img.cols; + int resize_h = img.rows; + int w_start = int((resize_w - crop_size) / 2); + int h_start = int((resize_h - crop_size) / 2); + cv::Rect rect(w_start, h_start, crop_size, crop_size); + img = img(rect); +} + +void ResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, + int resize_short_size) { + int w = img.cols; + int h = img.rows; + + float ratio = 1.f; + if (h < w) { + ratio = float(resize_short_size) / float(h); + } else { + ratio = float(resize_short_size) / float(w); + } + + int resize_h = round(float(h) * ratio); + int resize_w = round(float(w) * ratio); + + cv::resize(img, resize_img, cv::Size(resize_w, resize_h)); +} + +} // namespace PaddleClas diff --git a/deploy/paddleserving/preprocess/preprocess_op.h b/deploy/paddleserving/preprocess/preprocess_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0ea9d2e14a525365bb049a13358660a2567dadc8 --- /dev/null +++ b/deploy/paddleserving/preprocess/preprocess_op.h @@ -0,0 +1,81 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/imgproc.hpp" +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace Feature { + +class Normalize { +public: + virtual void Run(cv::Mat *im, const std::vector &mean, + const std::vector &std, float scale); +}; + +// RGB -> CHW +class Permute { +public: + virtual void Run(const cv::Mat *im, float *data); +}; + +class CenterCropImg { +public: + virtual void Run(cv::Mat &im, const int crop_size = 224); +}; + +class ResizeImg { +public: + virtual void Run(const cv::Mat &img, cv::Mat &resize_img, int max_size_len, + int size = 0); +}; + +} // namespace Feature + +namespace PaddleClas { + +class Normalize { +public: + virtual void Run(cv::Mat *im, const std::vector &mean, + const std::vector &scale, const bool is_scale = true); +}; + +// RGB -> CHW +class Permute { +public: + virtual void Run(const cv::Mat *im, float *data); +}; + +class CenterCropImg { +public: + virtual void Run(cv::Mat &im, const int crop_size = 224); +}; + +class ResizeImg { +public: + virtual void Run(const cv::Mat &img, cv::Mat &resize_img, int max_size_len); +}; + +} // namespace PaddleClas diff --git a/deploy/paddleserving/readme.md b/deploy/paddleserving/readme.md index a2fdec2de5ac3f468ff7ed63b04ebf7bb7b2f574..321d6e0f68e11b3eb9a47ad45076b8a2e3aa771a 120000 --- a/deploy/paddleserving/readme.md +++ b/deploy/paddleserving/readme.md @@ -1 +1 @@ -../../docs/zh_CN/inference_deployment/paddle_serving_deploy.md \ No newline at end of file +../../docs/zh_CN/inference_deployment/classification_serving_deploy.md \ No newline at end of file diff --git a/deploy/paddleserving/readme_en.md b/deploy/paddleserving/readme_en.md new file mode 120000 index 0000000000000000000000000000000000000000..80b5fede2d9809db34b1f28a1141262865e042e0 --- /dev/null +++ b/deploy/paddleserving/readme_en.md @@ -0,0 +1 @@ +../../docs/en/inference_deployment/classification_serving_deploy_en.md \ No newline at end of file diff --git a/deploy/paddleserving/recognition/config.yml b/deploy/paddleserving/recognition/config.yml index 6ecc32e22435f07a549ffcdeb6a435b33c4901f1..e4108006e6f2ea1a3698e4fdf9c32f25dcbfbeb0 100644 --- a/deploy/paddleserving/recognition/config.yml +++ b/deploy/paddleserving/recognition/config.yml @@ -31,7 +31,7 @@ op: #Fetch结果列表,以client_config中fetch_var的alias_name为准 fetch_list: ["features"] - + det: concurrency: 1 local_service_conf: diff --git a/deploy/paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt b/deploy/paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..c781eb6f449fe06afbba7f96e01798c974bccf54 --- /dev/null +++ b/deploy/paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt @@ -0,0 +1,32 @@ +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +feed_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + feed_type: 1 + shape: 6 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: false + fetch_type: 1 + shape: 512 +} +fetch_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + fetch_type: 1 + shape: 6 +} + + diff --git a/deploy/paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt b/deploy/paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..04812f42ed90fbbd47c73b9ec706d57c04b4c571 --- /dev/null +++ b/deploy/paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/serving_server_conf.prototxt @@ -0,0 +1,30 @@ +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +feed_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + feed_type: 1 + shape: 6 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: false + fetch_type: 1 + shape: 512 +} +fetch_var { + name: "boxes" + alias_name: "boxes" + is_lod_tensor: false + fetch_type: 1 + shape: 6 +} diff --git a/deploy/paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt b/deploy/paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..d9ab81a8b3c275f638f314489a84deef46011d73 --- /dev/null +++ b/deploy/paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt @@ -0,0 +1,29 @@ +feed_var { + name: "im_shape" + alias_name: "im_shape" + is_lod_tensor: false + feed_type: 1 + shape: 2 +} +feed_var { + name: "image" + alias_name: "image" + is_lod_tensor: false + feed_type: 7 + shape: -1 + shape: -1 + shape: 3 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "save_infer_model/scale_0.tmp_1" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +fetch_var { + name: "save_infer_model/scale_1.tmp_1" + alias_name: "save_infer_model/scale_1.tmp_1" + is_lod_tensor: false + fetch_type: 2 +} diff --git a/deploy/paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/serving_server_conf.prototxt b/deploy/paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/serving_server_conf.prototxt new file mode 100644 index 0000000000000000000000000000000000000000..d9ab81a8b3c275f638f314489a84deef46011d73 --- /dev/null +++ b/deploy/paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/serving_server_conf.prototxt @@ -0,0 +1,29 @@ +feed_var { + name: "im_shape" + alias_name: "im_shape" + is_lod_tensor: false + feed_type: 1 + shape: 2 +} +feed_var { + name: "image" + alias_name: "image" + is_lod_tensor: false + feed_type: 7 + shape: -1 + shape: -1 + shape: 3 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "save_infer_model/scale_0.tmp_1" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +fetch_var { + name: "save_infer_model/scale_1.tmp_1" + alias_name: "save_infer_model/scale_1.tmp_1" + is_lod_tensor: false + fetch_type: 2 +} diff --git a/deploy/paddleserving/recognition/readme.md b/deploy/paddleserving/recognition/readme.md new file mode 120000 index 0000000000000000000000000000000000000000..116873ea2d00c750a36c3ebe2a727b34ccb11e4c --- /dev/null +++ b/deploy/paddleserving/recognition/readme.md @@ -0,0 +1 @@ +../../../docs/zh_CN/inference_deployment/recognition_serving_deploy.md \ No newline at end of file diff --git a/deploy/paddleserving/recognition/readme_en.md b/deploy/paddleserving/recognition/readme_en.md new file mode 120000 index 0000000000000000000000000000000000000000..2250088e12a4f6a3e9b41889f1fc9d00a983dfe7 --- /dev/null +++ b/deploy/paddleserving/recognition/readme_en.md @@ -0,0 +1 @@ +../../../docs/en/inference_deployment/recognition_serving_deploy_en.md \ No newline at end of file diff --git a/deploy/paddleserving/recognition/run_cpp_serving.sh b/deploy/paddleserving/recognition/run_cpp_serving.sh index affca99c63da9c70fd7c5dd4eb6079fe8ba6b7e6..e1deb1148b1705031c0e92522e7eaf7cf4679a45 100644 --- a/deploy/paddleserving/recognition/run_cpp_serving.sh +++ b/deploy/paddleserving/recognition/run_cpp_serving.sh @@ -1,7 +1,14 @@ -nohup python3 -m paddle_serving_server.serve \ ---model ../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving \ - --port 9293 >>log_mainbody_detection.txt 1&>2 & +gpu_id=$1 -nohup python3 -m paddle_serving_server.serve \ ---model ../../models/general_PPLCNet_x2_5_lite_v1.0_serving \ ---port 9294 >>log_feature_extraction.txt 1&>2 & +# PP-ShiTu CPP serving script +if [[ -n "${gpu_id}" ]]; then + nohup python3.7 -m paddle_serving_server.serve \ + --model ../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving ../../models/general_PPLCNet_x2_5_lite_v1.0_serving \ + --op GeneralPicodetOp GeneralFeatureExtractOp \ + --port 9400 --gpu_id="${gpu_id}" > log_PPShiTu.txt 2>&1 & +else + nohup python3.7 -m paddle_serving_server.serve \ + --model ../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving ../../models/general_PPLCNet_x2_5_lite_v1.0_serving \ + --op GeneralPicodetOp GeneralFeatureExtractOp \ + --port 9400 > log_PPShiTu.txt 2>&1 & +fi diff --git a/deploy/paddleserving/recognition/test_cpp_serving_client.py b/deploy/paddleserving/recognition/test_cpp_serving_client.py index a2bf1ae3e9d0a69628319b9f845a1e6f7701b391..e2cd17e855ebfe8fb286ebaeff8ab63874e2e972 100644 --- a/deploy/paddleserving/recognition/test_cpp_serving_client.py +++ b/deploy/paddleserving/recognition/test_cpp_serving_client.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import numpy as np from paddle_serving_client import Client @@ -22,181 +21,101 @@ import faiss import os import pickle - -class MainbodyDetect(): - """ - pp-shitu mainbody detect. - include preprocess, process, postprocess - return detect results - Attention: Postprocess include num limit and box filter; no nms - """ - - def __init__(self): - self.preprocess = DetectionSequential([ - DetectionFile2Image(), DetectionNormalize( - [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True), - DetectionResize( - (640, 640), False, interpolation=2), DetectionTranspose( - (2, 0, 1)) - ]) - - self.client = Client() - self.client.load_client_config( - "../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt" - ) - self.client.connect(['127.0.0.1:9293']) - - self.max_det_result = 5 - self.conf_threshold = 0.2 - - def predict(self, imgpath): - im, im_info = self.preprocess(imgpath) - im_shape = np.array(im.shape[1:]).reshape(-1) - scale_factor = np.array(list(im_info['scale_factor'])).reshape(-1) - - fetch_map = self.client.predict( - feed={ - "image": im, - "im_shape": im_shape, - "scale_factor": scale_factor, - }, - fetch=["save_infer_model/scale_0.tmp_1"], - batch=False) - return self.postprocess(fetch_map, imgpath) - - def postprocess(self, fetch_map, imgpath): - #1. get top max_det_result - det_results = fetch_map["save_infer_model/scale_0.tmp_1"] - if len(det_results) > self.max_det_result: - boxes_reserved = fetch_map[ - "save_infer_model/scale_0.tmp_1"][:self.max_det_result] - else: - boxes_reserved = det_results - - #2. do conf threshold - boxes_list = [] - for i in range(boxes_reserved.shape[0]): - if (boxes_reserved[i, 1]) > self.conf_threshold: - boxes_list.append(boxes_reserved[i, :]) - - #3. add origin image box - origin_img = cv2.imread(imgpath) - boxes_list.append( - np.array([0, 1.0, 0, 0, origin_img.shape[1], origin_img.shape[0]])) - return np.array(boxes_list) - - -class ObjectRecognition(): - """ - pp-shitu object recognion for all objects detected by MainbodyDetect. - include preprocess, process, postprocess - preprocess include preprocess for each image and batching. - Batch process - postprocess include retrieval and nms - """ - - def __init__(self): - self.client = Client() - self.client.load_client_config( - "../../models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt" - ) - self.client.connect(["127.0.0.1:9294"]) - - self.seq = Sequential([ - BGR2RGB(), Resize((224, 224)), Div(255), - Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], - False), Transpose((2, 0, 1)) - ]) - - self.searcher, self.id_map = self.init_index() - - self.rec_nms_thresold = 0.05 - self.rec_score_thres = 0.5 - self.feature_normalize = True - self.return_k = 1 - - def init_index(self): - index_dir = "../../drink_dataset_v1.0/index" - assert os.path.exists(os.path.join( - index_dir, "vector.index")), "vector.index not found ..." - assert os.path.exists(os.path.join( - index_dir, "id_map.pkl")), "id_map.pkl not found ... " - - searcher = faiss.read_index(os.path.join(index_dir, "vector.index")) - - with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd: - id_map = pickle.load(fd) - return searcher, id_map - - def predict(self, det_boxes, imgpath): - #1. preprocess - batch_imgs = [] - origin_img = cv2.imread(imgpath) - for i in range(det_boxes.shape[0]): - box = det_boxes[i] - x1, y1, x2, y2 = [int(x) for x in box[2:]] - cropped_img = origin_img[y1:y2, x1:x2, :].copy() - tmp = self.seq(cropped_img) - batch_imgs.append(tmp) - batch_imgs = np.array(batch_imgs) - - #2. process - fetch_map = self.client.predict( - feed={"x": batch_imgs}, fetch=["features"], batch=True) - batch_features = fetch_map["features"] - - #3. postprocess - if self.feature_normalize: - feas_norm = np.sqrt( - np.sum(np.square(batch_features), axis=1, keepdims=True)) - batch_features = np.divide(batch_features, feas_norm) - scores, docs = self.searcher.search(batch_features, self.return_k) - - results = [] - for i in range(scores.shape[0]): - pred = {} - if scores[i][0] >= self.rec_score_thres: - pred["bbox"] = [int(x) for x in det_boxes[i, 2:]] - pred["rec_docs"] = self.id_map[docs[i][0]].split()[1] - pred["rec_scores"] = scores[i][0] - results.append(pred) - return self.nms_to_rec_results(results) - - def nms_to_rec_results(self, results): - filtered_results = [] - x1 = np.array([r["bbox"][0] for r in results]).astype("float32") - y1 = np.array([r["bbox"][1] for r in results]).astype("float32") - x2 = np.array([r["bbox"][2] for r in results]).astype("float32") - y2 = np.array([r["bbox"][3] for r in results]).astype("float32") - scores = np.array([r["rec_scores"] for r in results]) - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = scores.argsort()[::-1] - while order.size > 0: - i = order[0] - xx1 = np.maximum(x1[i], x1[order[1:]]) - yy1 = np.maximum(y1[i], y1[order[1:]]) - xx2 = np.minimum(x2[i], x2[order[1:]]) - yy2 = np.minimum(y2[i], y2[order[1:]]) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - inter = w * h - ovr = inter / (areas[i] + areas[order[1:]] - inter) - inds = np.where(ovr <= self.rec_nms_thresold)[0] - order = order[inds + 1] - filtered_results.append(results[i]) - return filtered_results - - +rec_nms_thresold = 0.05 +rec_score_thres = 0.5 +feature_normalize = True +return_k = 1 +index_dir = "../../drink_dataset_v1.0/index" + + +def init_index(index_dir): + assert os.path.exists(os.path.join( + index_dir, "vector.index")), "vector.index not found ..." + assert os.path.exists(os.path.join( + index_dir, "id_map.pkl")), "id_map.pkl not found ... " + + searcher = faiss.read_index(os.path.join(index_dir, "vector.index")) + + with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd: + id_map = pickle.load(fd) + return searcher, id_map + + +#get box +def nms_to_rec_results(results, thresh=0.1): + filtered_results = [] + + x1 = np.array([r["bbox"][0] for r in results]).astype("float32") + y1 = np.array([r["bbox"][1] for r in results]).astype("float32") + x2 = np.array([r["bbox"][2] for r in results]).astype("float32") + y2 = np.array([r["bbox"][3] for r in results]).astype("float32") + scores = np.array([r["rec_scores"] for r in results]) + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + while order.size > 0: + i = order[0] + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + filtered_results.append(results[i]) + return filtered_results + + +def postprocess(fetch_dict, feature_normalize, det_boxes, searcher, id_map, + return_k, rec_score_thres, rec_nms_thresold): + batch_features = fetch_dict["features"] + + #do feature norm + if feature_normalize: + feas_norm = np.sqrt( + np.sum(np.square(batch_features), axis=1, keepdims=True)) + batch_features = np.divide(batch_features, feas_norm) + + scores, docs = searcher.search(batch_features, return_k) + + results = [] + for i in range(scores.shape[0]): + pred = {} + if scores[i][0] >= rec_score_thres: + pred["bbox"] = [int(x) for x in det_boxes[i, 2:]] + pred["rec_docs"] = id_map[docs[i][0]].split()[1] + pred["rec_scores"] = scores[i][0] + results.append(pred) + + #do nms + results = nms_to_rec_results(results, rec_nms_thresold) + return results + + +#do client if __name__ == "__main__": - det = MainbodyDetect() - rec = ObjectRecognition() - - #1. get det_results - imgpath = "../../drink_dataset_v1.0/test_images/001.jpeg" - det_results = det.predict(imgpath) - - #2. get rec_results - rec_results = rec.predict(det_results, imgpath) - print(rec_results) + client = Client() + client.load_client_config([ + "../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client", + "../../models/general_PPLCNet_x2_5_lite_v1.0_client" + ]) + client.connect(['127.0.0.1:9400']) + + im = cv2.imread("../../drink_dataset_v1.0/test_images/001.jpeg") + im_shape = np.array(im.shape[:2]).reshape(-1) + fetch_map = client.predict( + feed={"image": im, + "im_shape": im_shape}, + fetch=["features", "boxes"], + batch=False) + + #add retrieval procedure + det_boxes = fetch_map["boxes"] + searcher, id_map = init_index(index_dir) + results = postprocess(fetch_map, feature_normalize, det_boxes, searcher, + id_map, return_k, rec_score_thres, rec_nms_thresold) + print(results) diff --git a/deploy/paddleserving/run_cpp_serving.sh b/deploy/paddleserving/run_cpp_serving.sh index 05794b7d953e578880dcc9cb87e91be0c031a415..4aecab368663968ad285372be05371b6a6c0138c 100644 --- a/deploy/paddleserving/run_cpp_serving.sh +++ b/deploy/paddleserving/run_cpp_serving.sh @@ -1,2 +1,14 @@ -#run cls server: -nohup python3 -m paddle_serving_server.serve --model ResNet50_vd_serving --port 9292 & +gpu_id=$1 + +# ResNet50_vd CPP serving script +if [[ -n "${gpu_id}" ]]; then + nohup python3.7 -m paddle_serving_server.serve \ + --model ./ResNet50_vd_serving \ + --op GeneralClasOp \ + --port 9292 & +else + nohup python3.7 -m paddle_serving_server.serve \ + --model ./ResNet50_vd_serving \ + --op GeneralClasOp \ + --port 9292 --gpu_id="${gpu_id}" & +fi diff --git a/deploy/paddleserving/test_cpp_serving_client.py b/deploy/paddleserving/test_cpp_serving_client.py index 50794b363767c8236ccca1001a441b535a9f9db3..ba5399c90dcd5e0701df26e2d2f8337a4105ab51 100644 --- a/deploy/paddleserving/test_cpp_serving_client.py +++ b/deploy/paddleserving/test_cpp_serving_client.py @@ -12,16 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys +import base64 +import time + from paddle_serving_client import Client -#app -from paddle_serving_app.reader import Sequential, URL2Image, Resize -from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize -import time + +def bytes_to_base64(image: bytes) -> str: + """encode bytes into base64 string + """ + return base64.b64encode(image).decode('utf8') + client = Client() -client.load_client_config("./ResNet50_vd_serving/serving_server_conf.prototxt") +client.load_client_config("./ResNet50_client/serving_client_conf.prototxt") client.connect(["127.0.0.1:9292"]) label_dict = {} @@ -31,22 +35,17 @@ with open("imagenet.label") as fin: label_dict[label_idx] = line.strip() label_idx += 1 -#preprocess -seq = Sequential([ - URL2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - -start = time.time() -image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" +image_file = "./daisy.jpg" for i in range(1): - img = seq(image_file) - fetch_map = client.predict( - feed={"inputs": img}, fetch=["prediction"], batch=False) - - prob = max(fetch_map["prediction"][0]) - label = label_dict[fetch_map["prediction"][0].tolist().index(prob)].strip( - ).replace(",", "") - print("prediction: {}, probability: {}".format(label, prob)) -end = time.time() -print(end - start) + start = time.time() + with open(image_file, 'rb') as img_file: + image_data = img_file.read() + image = bytes_to_base64(image_data) + fetch_dict = client.predict( + feed={"inputs": image}, fetch=["prediction"], batch=False) + prob = max(fetch_dict["prediction"][0]) + label = label_dict[fetch_dict["prediction"][0].tolist().index( + prob)].strip().replace(",", "") + print("prediction: {}, probability: {}".format(label, prob)) + end = time.time() + print(end - start) diff --git a/deploy/python/postprocess.py b/deploy/python/postprocess.py index d26cbaa9a8558ffb7f96115eef0a0bd9481fe47a..23a803e284361e98b60f193c450318536d992937 100644 --- a/deploy/python/postprocess.py +++ b/deploy/python/postprocess.py @@ -53,6 +53,34 @@ class PostProcesser(object): return rtn +class ThreshOutput(object): + def __init__(self, threshold, label_0="0", label_1="1"): + self.threshold = threshold + self.label_0 = label_0 + self.label_1 = label_1 + + def __call__(self, x, file_names=None): + y = [] + for idx, probs in enumerate(x): + score = probs[1] + if score < self.threshold: + result = { + "class_ids": [0], + "scores": [1 - score], + "label_names": [self.label_0] + } + else: + result = { + "class_ids": [1], + "scores": [score], + "label_names": [self.label_1] + } + if file_names is not None: + result["file_name"] = file_names[idx] + y.append(result) + return y + + class Topk(object): def __init__(self, topk=1, class_id_map_file=None): assert isinstance(topk, (int, )) @@ -159,3 +187,136 @@ class Binarize(object): byte[:, i:i + 1] = np.dot(x[:, i * 8:(i + 1) * 8], self.unit) return byte + + +class PersonAttribute(object): + def __init__(self, + threshold=0.5, + glasses_threshold=0.3, + hold_threshold=0.6): + self.threshold = threshold + self.glasses_threshold = glasses_threshold + self.hold_threshold = hold_threshold + + def __call__(self, batch_preds, file_names=None): + # postprocess output of predictor + age_list = ['AgeLess18', 'Age18-60', 'AgeOver60'] + direct_list = ['Front', 'Side', 'Back'] + bag_list = ['HandBag', 'ShoulderBag', 'Backpack'] + upper_list = ['UpperStride', 'UpperLogo', 'UpperPlaid', 'UpperSplice'] + lower_list = [ + 'LowerStripe', 'LowerPattern', 'LongCoat', 'Trousers', 'Shorts', + 'Skirt&Dress' + ] + batch_res = [] + for res in batch_preds: + res = res.tolist() + label_res = [] + # gender + gender = 'Female' if res[22] > self.threshold else 'Male' + label_res.append(gender) + # age + age = age_list[np.argmax(res[19:22])] + label_res.append(age) + # direction + direction = direct_list[np.argmax(res[23:])] + label_res.append(direction) + # glasses + glasses = 'Glasses: ' + if res[1] > self.glasses_threshold: + glasses += 'True' + else: + glasses += 'False' + label_res.append(glasses) + # hat + hat = 'Hat: ' + if res[0] > self.threshold: + hat += 'True' + else: + hat += 'False' + label_res.append(hat) + # hold obj + hold_obj = 'HoldObjectsInFront: ' + if res[18] > self.hold_threshold: + hold_obj += 'True' + else: + hold_obj += 'False' + label_res.append(hold_obj) + # bag + bag = bag_list[np.argmax(res[15:18])] + bag_score = res[15 + np.argmax(res[15:18])] + bag_label = bag if bag_score > self.threshold else 'No bag' + label_res.append(bag_label) + # upper + upper_res = res[4:8] + upper_label = 'Upper:' + sleeve = 'LongSleeve' if res[3] > res[2] else 'ShortSleeve' + upper_label += ' {}'.format(sleeve) + for i, r in enumerate(upper_res): + if r > self.threshold: + upper_label += ' {}'.format(upper_list[i]) + label_res.append(upper_label) + # lower + lower_res = res[8:14] + lower_label = 'Lower: ' + has_lower = False + for i, l in enumerate(lower_res): + if l > self.threshold: + lower_label += ' {}'.format(lower_list[i]) + has_lower = True + if not has_lower: + lower_label += ' {}'.format(lower_list[np.argmax(lower_res)]) + + label_res.append(lower_label) + # shoe + shoe = 'Boots' if res[14] > self.threshold else 'No boots' + label_res.append(shoe) + + threshold_list = [0.5] * len(res) + threshold_list[1] = self.glasses_threshold + threshold_list[18] = self.hold_threshold + pred_res = (np.array(res) > np.array(threshold_list) + ).astype(np.int8).tolist() + batch_res.append({"attributes": label_res, "output": pred_res}) + return batch_res + + +class VehicleAttribute(object): + def __init__(self, color_threshold=0.5, type_threshold=0.5): + self.color_threshold = color_threshold + self.type_threshold = type_threshold + self.color_list = [ + "yellow", "orange", "green", "gray", "red", "blue", "white", + "golden", "brown", "black" + ] + self.type_list = [ + "sedan", "suv", "van", "hatchback", "mpv", "pickup", "bus", + "truck", "estate" + ] + + def __call__(self, batch_preds, file_names=None): + # postprocess output of predictor + batch_res = [] + for res in batch_preds: + res = res.tolist() + label_res = [] + color_idx = np.argmax(res[:10]) + type_idx = np.argmax(res[10:]) + if res[color_idx] >= self.color_threshold: + color_info = f"Color: ({self.color_list[color_idx]}, prob: {res[color_idx]})" + else: + color_info = "Color unknown" + + if res[type_idx + 10] >= self.type_threshold: + type_info = f"Type: ({self.type_list[type_idx]}, prob: {res[type_idx + 10]})" + else: + type_info = "Type unknown" + + label_res = f"{color_info}, {type_info}" + + threshold_list = [self.color_threshold + ] * 10 + [self.type_threshold] * 9 + pred_res = (np.array(res) > np.array(threshold_list) + ).astype(np.int8).tolist() + batch_res.append({"attributes": label_res, "output": pred_res}) + return batch_res diff --git a/deploy/python/predict_cls.py b/deploy/python/predict_cls.py index 574caa3e73bffee4fbf86224f5d91bc7965694b1..49bf62fa3060b9336a3438b2ee5c25b2bac49667 100644 --- a/deploy/python/predict_cls.py +++ b/deploy/python/predict_cls.py @@ -49,10 +49,15 @@ class ClsPredictor(Predictor): pid = os.getpid() size = config["PreProcess"]["transform_ops"][1]["CropImage"][ "size"] + if config["Global"].get("use_int8", False): + precision = "int8" + elif config["Global"].get("use_fp16", False): + precision = "fp16" + else: + precision = "fp32" self.auto_logger = auto_log.AutoLogger( model_name=config["Global"].get("model_name", "cls"), - model_precision='fp16' - if config["Global"]["use_fp16"] else 'fp32', + model_precision=precision, batch_size=config["Global"].get("batch_size", 1), data_shape=[3, size, size], save_path=config["Global"].get("save_log_path", @@ -133,13 +138,20 @@ def main(config): continue batch_results = cls_predictor.predict(batch_imgs) for number, result_dict in enumerate(batch_results): - filename = batch_names[number] - clas_ids = result_dict["class_ids"] - scores_str = "[{}]".format(", ".join("{:.2f}".format( - r) for r in result_dict["scores"])) - label_names = result_dict["label_names"] - print("{}:\tclass id(s): {}, score(s): {}, label_name(s): {}". - format(filename, clas_ids, scores_str, label_names)) + if "PersonAttribute" in config[ + "PostProcess"] or "VehicleAttribute" in config[ + "PostProcess"]: + filename = batch_names[number] + print("{}:\t {}".format(filename, result_dict)) + else: + filename = batch_names[number] + clas_ids = result_dict["class_ids"] + scores_str = "[{}]".format(", ".join("{:.2f}".format( + r) for r in result_dict["scores"])) + label_names = result_dict["label_names"] + print( + "{}:\tclass id(s): {}, score(s): {}, label_name(s): {}". + format(filename, clas_ids, scores_str, label_names)) batch_imgs = [] batch_names = [] if cls_predictor.benchmark: diff --git a/deploy/python/predict_det.py b/deploy/python/predict_det.py index e4e0a24a6dbc6c62f82810c865096f768ebd182b..37a7bf5018c3b5dc78e897b532303f70b0d3957d 100644 --- a/deploy/python/predict_det.py +++ b/deploy/python/predict_det.py @@ -128,13 +128,10 @@ class DetPredictor(Predictor): results = [] if reduce(lambda x, y: x * y, np_boxes.shape) < 6: print('[WARNNING] No object detected.') - results = np.array([]) else: - results = np_boxes - - results = self.parse_det_results(results, - self.config["Global"]["threshold"], - self.config["Global"]["labe_list"]) + results = self.parse_det_results( + np_boxes, self.config["Global"]["threshold"], + self.config["Global"]["label_list"]) return results diff --git a/deploy/python/preprocess.py b/deploy/python/preprocess.py index 1da32ad6e38028daf1a6708904c4fcdb6089a2f9..6952708337f8c0fb353d6c9f98bc7b91882163e8 100644 --- a/deploy/python/preprocess.py +++ b/deploy/python/preprocess.py @@ -27,6 +27,7 @@ import cv2 import numpy as np import importlib from PIL import Image +from paddle.vision.transforms import ToTensor, Normalize from python.det_preprocess import DetNormalizeImage, DetPadStride, DetPermute, DetResize @@ -53,13 +54,14 @@ def create_operators(params): class UnifiedResize(object): - def __init__(self, interpolation=None, backend="cv2"): + def __init__(self, interpolation=None, backend="cv2", return_numpy=True): _cv2_interp_from_str = { 'nearest': cv2.INTER_NEAREST, 'bilinear': cv2.INTER_LINEAR, 'area': cv2.INTER_AREA, 'bicubic': cv2.INTER_CUBIC, - 'lanczos': cv2.INTER_LANCZOS4 + 'lanczos': cv2.INTER_LANCZOS4, + 'random': (cv2.INTER_LINEAR, cv2.INTER_CUBIC) } _pil_interp_from_str = { 'nearest': Image.NEAREST, @@ -67,13 +69,26 @@ class UnifiedResize(object): 'bicubic': Image.BICUBIC, 'box': Image.BOX, 'lanczos': Image.LANCZOS, - 'hamming': Image.HAMMING + 'hamming': Image.HAMMING, + 'random': (Image.BILINEAR, Image.BICUBIC) } - def _pil_resize(src, size, resample): - pil_img = Image.fromarray(src) + def _cv2_resize(src, size, resample): + if isinstance(resample, tuple): + resample = random.choice(resample) + return cv2.resize(src, size, interpolation=resample) + + def _pil_resize(src, size, resample, return_numpy=True): + if isinstance(resample, tuple): + resample = random.choice(resample) + if isinstance(src, np.ndarray): + pil_img = Image.fromarray(src) + else: + pil_img = src pil_img = pil_img.resize(size, resample) - return np.asarray(pil_img) + if return_numpy: + return np.asarray(pil_img) + return pil_img if backend.lower() == "cv2": if isinstance(interpolation, str): @@ -81,11 +96,12 @@ class UnifiedResize(object): # compatible with opencv < version 4.4.0 elif interpolation is None: interpolation = cv2.INTER_LINEAR - self.resize_func = partial(cv2.resize, interpolation=interpolation) + self.resize_func = partial(_cv2_resize, resample=interpolation) elif backend.lower() == "pil": if isinstance(interpolation, str): interpolation = _pil_interp_from_str[interpolation.lower()] - self.resize_func = partial(_pil_resize, resample=interpolation) + self.resize_func = partial( + _pil_resize, resample=interpolation, return_numpy=return_numpy) else: logger.warning( f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. Use \"cv2\" instead." @@ -93,6 +109,8 @@ class UnifiedResize(object): self.resize_func = cv2.resize def __call__(self, src, size): + if isinstance(size, list): + size = tuple(size) return self.resize_func(src, size) @@ -137,7 +155,8 @@ class ResizeImage(object): size=None, resize_short=None, interpolation=None, - backend="cv2"): + backend="cv2", + return_numpy=True): if resize_short is not None and resize_short > 0: self.resize_short = resize_short self.w = None @@ -151,10 +170,18 @@ class ResizeImage(object): 'both 'size' and 'resize_short' are None") self._resize_func = UnifiedResize( - interpolation=interpolation, backend=backend) + interpolation=interpolation, + backend=backend, + return_numpy=return_numpy) def __call__(self, img): - img_h, img_w = img.shape[:2] + if isinstance(img, np.ndarray): + # numpy input + img_h, img_w = img.shape[:2] + else: + # PIL image input + img_w, img_h = img.size + if self.resize_short is not None: percent = float(self.resize_short) / min(img_w, img_h) w = int(round(img_w * percent)) diff --git a/deploy/slim/quant_post_static.py b/deploy/slim/quant_post_static.py index 5c8469794ad29e18dad15f985b611e423fd4b474..4e53b24119a1411be9ac93aecd3a48995d948346 100644 --- a/deploy/slim/quant_post_static.py +++ b/deploy/slim/quant_post_static.py @@ -41,8 +41,11 @@ def main(): 'inference.pdmodel')) and os.path.exists( os.path.join(config["Global"]["save_inference_dir"], 'inference.pdiparams')) + if "Query" in config["DataLoader"]["Eval"]: + config["DataLoader"]["Eval"] = config["DataLoader"]["Eval"]["Query"] config["DataLoader"]["Eval"]["sampler"]["batch_size"] = 1 config["DataLoader"]["Eval"]["loader"]["num_workers"] = 0 + init_logger() device = paddle.set_device("cpu") train_dataloader = build_dataloader(config["DataLoader"], "Eval", device, @@ -67,6 +70,7 @@ def main(): quantize_model_path=os.path.join( config["Global"]["save_inference_dir"], "quant_post_static_model"), sample_generator=sample_generator(train_dataloader), + batch_size=config["DataLoader"]["Eval"]["sampler"]["batch_size"], batch_nums=10) diff --git a/deploy/utils/predictor.py b/deploy/utils/predictor.py index 7fd1d6dccb61b86f1fece2e3a909c7005f93ca8a..9a38ccd18981c1ddd5dfc75152fa1d31f71d2b06 100644 --- a/deploy/utils/predictor.py +++ b/deploy/utils/predictor.py @@ -42,8 +42,22 @@ class Predictor(object): def create_paddle_predictor(self, args, inference_model_dir=None): if inference_model_dir is None: inference_model_dir = args.inference_model_dir - params_file = os.path.join(inference_model_dir, "inference.pdiparams") - model_file = os.path.join(inference_model_dir, "inference.pdmodel") + if "inference_int8.pdiparams" in os.listdir(inference_model_dir): + params_file = os.path.join(inference_model_dir, + "inference_int8.pdiparams") + model_file = os.path.join(inference_model_dir, + "inference_int8.pdmodel") + assert args.get( + "use_fp16", False + ) is False, "fp16 mode is not supported for int8 model inference, please set use_fp16 as False during inference." + else: + params_file = os.path.join(inference_model_dir, + "inference.pdiparams") + model_file = os.path.join(inference_model_dir, "inference.pdmodel") + assert args.get( + "use_int8", False + ) is False, "int8 mode is not supported for fp32 model inference, please set use_int8 as False during inference." + config = Config(model_file, params_file) if args.use_gpu: @@ -63,12 +77,18 @@ class Predictor(object): config.disable_glog_info() config.switch_ir_optim(args.ir_optim) # default true if args.use_tensorrt: + precision = Config.Precision.Float32 + if args.get("use_int8", False): + precision = Config.Precision.Int8 + elif args.get("use_fp16", False): + precision = Config.Precision.Half + config.enable_tensorrt_engine( - precision_mode=Config.Precision.Half - if args.use_fp16 else Config.Precision.Float32, + precision_mode=precision, max_batch_size=args.batch_size, workspace_size=1 << 30, - min_subgraph_size=30) + min_subgraph_size=30, + use_calib_mode=False) config.enable_memory_optim() # use zero copy diff --git a/docs/en/PULC/PULC_car_exists_en.md b/docs/en/PULC/PULC_car_exists_en.md new file mode 100644 index 0000000000000000000000000000000000000000..33c0932e6f118d7f9e31650e7d1e9754af19ec17 --- /dev/null +++ b/docs/en/PULC/PULC_car_exists_en.md @@ -0,0 +1,457 @@ +# PULC Classification Model of Containing or Uncontaining Car + +------ + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of car exists using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in monitoring scenarios, massive data filtering scenarios, etc. + +The following table lists the relevant indicators of the model. The first two lines means that using SwinTransformer_tiny and MobileNetV3_small_x0_35 as the backbone to training. The third to sixth lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. + +| Backbone | Tpr(%) | Latency(ms) | Size(M)| Training Strategy | +|-------|----------------|----------|---------------|---------------| +| SwinTranformer_tiny | 97.71 | 95.30 | 111 | using ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 81.23 | 2.85 | 2.7 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 94.72 | 2.12 | 7.1 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 95.48 | 2.12 | 7.1 | using SSLD pretrained model | +| PPLCNet_x1_0 | 95.48 | 2.12 | 7.1 | using SSLD pretrained model + EDA strategy | +| PPLCNet_x1_0 | 95.92 | 2.12 | 7.1 | using SSLD pretrained model + EDA strategy + SKL-UGI knowledge distillation strategy| + +It can be seen that high Tpr can be getted when backbone is SwinTranformer_tiny, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the Tpr will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0, the Tpr is higher more 13 percentage points than MobileNetv3_small_x0_35. At the same time, the speed can be more than 20% faster. After additional using the SSLD pretrained model, the Tpr can be improved by about 0.7 percentage points without affecting the inference speed. Finally, after additional using the SKL-UGI knowledge distillation, the Tpr can be further improved by 0.44 percentage points. At this point, the Tpr is close to that of SwinTranformer_tiny, but the speed is more than 40 times faster. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: + +* About `Tpr` metric, please refer to [3.2 section](#3.2) for more information . +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + +* Prediction with CLI + +```bash +paddleclas --model_name=car_exists --infer_imgs=pulc_demo_imgs/car_exists/objects365_00001507.jpeg +``` + +Results: + +``` +>>> result +class_ids: [1], scores: [0.9871138], label_names: ['contains_car'], filename: pulc_demo_imgs/car_exists/objects365_00001507.jpeg +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="car_exists") +result = model.predict(input_data="pulc_demo_imgs/car_exists/objects365_00001507.jpeg") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="car_exists", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'class_ids': [1], 'scores': [0.9871138], 'label_names': ['contains_car'], 'filename': 'pulc_demo_imgs/car_exists/objects365_00001507.jpeg'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +All datasets used in this case are open source data. Train and validation data are the subset of [Object365](https://www.objects365.org/overview.html) data. ImageNet_val is [ImageNet-1k](https://www.image-net.org/) validation data. + + + +#### 3.2.2 Getting Dataset + +The data used in this case can be getted by processing the open source data. The detailed processes are as follows: + +- Training data. This case deals with the annotation file of Objects365 data training data. If a certain image contains the label of "car" and the area of this box is greater than 10% in the whole image, it is considered that the image contains car. If there is no label of any vehicle in a certain image, such as car, bus and so on, it is considered that the image does not contain car. After processing, 108629 images were obtained, including 27422 images containing car and 81207 images uncontaining car. +- Validation data: Same as Training data, but checked manually to remove some labeled wrong images. + +**Note**: the labels of objects365 are not completely mutually exclusive. For example, F1 racing cars may be "F1 formula" or "car". In order to reduce the interference, we only keep the "car" label as containing car, and the figure without any vehicle as uncontaining car. + +Some image of the processed dataset is as follows: + +![](../../images/PULC/docs/car_exists_data_demo.jpeg) + +And you can also download the data processed directly. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/car_exists.tar +tar -xf car_exists.tar +cd ../ +``` + +The datas under `car_exists` directory: + +``` + +├── objects365_car +│   ├── objects365_00000039.jpg +│   ├── objects365_00000099.jpg +├── ImageNet_val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +... +├── train_list.txt +├── train_list.txt.debug +├── train_list_for_distill.txt +├── val_list.txt +└── val_list.txt.debug +``` + +Where `train/` and `val/` are training set and validation set respectively. The `train_list.txt` and `val_list.txt` are label files of training data and validation data respectively. The file `train_list.txt.debug` and `val_list.txt.debug` are subset of `train_list.txt` and `val_list.txt` respectively. `ImageNet_val/` is the validation data of ImageNet-1k, which will be used for SKL-UGI knowledge distillation, and its label file is `train_list_for_distill.txt`. + +**Note**: + +* About the contents format of `train_list.txt` and `val_list.txt`, please refer to [Description about Classification Dataset in PaddleClas](../data_preparation/classification_dataset_en.md). +* About the `train_list_for_distill.txt`, please refer to [Knowledge Distillation Label](../advanced_tutorials/distillation/distillation_en.md). + + + +### 3.3 Training + +The details of training config in `ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml +``` + +The best metric of validation data is between `0.95` and `0.96`. There would be fluctuations because the data size is small. + +**Note**: + +* The metric Tpr, that describe the True Positive Rate when False Positive Rate is less than a certain threshold(1/100 used in this case), is one of the commonly used metric for binary classification. About the details of Fpr and Tpr, please refer [here](https://en.wikipedia.org/wiki/Receiver_operating_characteristic). +* When evaluation, the best metric TprAtFpr will be printed that include `Fpr`, `Tpr` and the current `threshold`. The `Tpr` means the Recall rate under the current `Fpr`. The `Tpr` higher, the model better. The `threshold` would be used in deployment, which means the classification threshold under best `Fpr` metric. + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +The results: + +``` +[{'class_ids': [1], 'scores': [0.9871138], 'label_names': ['contains_car'], 'filename': 'deploy/images/PULC/car_exists/objects365_00001507.jpeg'}] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/car_exists/objects365_00001507.jpeg`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. +* The default threshold is `0.5`. If needed, you can specify the argument `Infer.PostProcess.threshold`, such as: `-o Infer.PostProcess.threshold=0.9794`. And the argument `threshold` is needed to be specified according by specific case. The `0.9794` is the best threshold when `Fpr` is less than `1/100` in this valuation dataset. + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/car_exists/PPLCNet/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +The best metric of validation data is between `0.96` and `0.98`. The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml`, the teacher model is `ResNet101_vd`, the student model is `PPLCNet_x1_0` and the additional unlabeled training data is validation data of ImageNet1k. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +The best metric is between `0.95` and `0.97`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_car_exists_infer +``` + +After running above command, the inference model files would be saved in `deploy/models/PPLCNet_x1_0_car_exists_infer`, as shown below: + +``` +├── PPLCNet_x1_0_car_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/car_exists_infer.tar && tar -xf car_exists_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── car_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify whether there are cars in the image `./images/PULC/car_exists/objects365_00001507.jpeg`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/car_exists/inference_car_exists.yaml +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/car_exists/inference_car_exists.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +objects365_00001507.jpeg: class id(s): [1], score(s): [0.99], label_name(s): ['contains_car'] +``` + +**Note**: The default threshold is `0.5`. If needed, you can specify the argument `Infer.PostProcess.threshold`, such as: `-o Infer.PostProcess.threshold=0.9794`. And the argument `threshold` is needed to be specified according by specific case. The `0.9794` is the best threshold when `Fpr` is less than `1/100` in this valuation dataset. Please refer to [3.3 section](#3.3) for details. + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/car_exists/inference_car_exists.yaml -o Global.infer_imgs="./images/PULC/car_exists/" +``` + +All prediction results will be printed, as shown below. + +``` +objects365_00001507.jpeg: class id(s): [1], score(s): [0.99], label_name(s): ['contains_car'] +objects365_00001521.jpeg: class id(s): [0], score(s): [0.99], label_name(s): ['no_car'] +``` + +Among the prediction results above, `contains_car` means that there is a car in the image, `no_car` means that there is no car in the image. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_language_classification_en.md b/docs/en/PULC/PULC_language_classification_en.md new file mode 100644 index 0000000000000000000000000000000000000000..c7cd5f5db9c01f01c4fbb2299086bc1adcfc98d1 --- /dev/null +++ b/docs/en/PULC/PULC_language_classification_en.md @@ -0,0 +1,470 @@ +# PULC Classification Model of Language + +------ + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of language in the image using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in various scenarios involving multilingual OCR processing, such as finance and government affairs. + +The following table lists the relevant indicators of the model. The first two lines means that using SwinTransformer_tiny and MobileNetV3_small_x0_35 as the backbone to training. The third to sixth lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. When replacing the backbone with PPLCNet_x1_0, the input shape of model is changed to [192, 48], and the stride of the network is changed to [2, [2, 1], [2, 1], [2, 1]]. + +| Backbone | Top1-Acc(%) | Latency(ms) | Size(M)| Training Strategy | +| ----------------------- | --------- | -------- | ------- | ---------------------------------------------- | +| SwinTranformer_tiny | 98.12 | 89.09 | 111 | using ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 95.92 | 2.98 | 3.7 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 98.35 | 2.58 | 7.1 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 98.7 | 2.58 | 7.1 | using SSLD pretrained model | +| PPLCNet_x1_0 | 99.12 | 2.58 | 7.1 | using SSLD pretrained model + EDA strategy | +| **PPLCNet_x1_0** | **99.26** | **2.58** | **7.1** | using SSLD pretrained model + EDA strategy + SKL-UGI knowledge distillation strategy| + +It can be seen that high accuracy can be getted when backbone is SwinTranformer_tiny, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the accuracy will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0 and changing the input shape and stride of network, the accuracy is higher more 2.43 percentage points than MobileNetv3_small_x0_35. At the same time, the speed can be more than 20% faster. After additional using the SSLD pretrained model, the accuracy can be improved by about 0.35 percentage points without affecting the inference speed. Further, additional using the EDA strategy, the accuracy can be increased by 0.42 percentage points. Finally, after additional using the SKL-UGI knowledge distillation, the accuracy can be further improved by 0.14 percentage points. At this point, the accuracy is higher than that of SwinTranformer_tiny, but the speed is more faster. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: + +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + +* Prediction with CLI + +```bash +paddleclas --model_name=language_classification --infer_imgs=pulc_demo_imgs/language_classification/word_35404.png +``` + +Results: + +``` +>>> result +class_ids: [4, 6], scores: [0.88672, 0.01434], label_names: ['japan', 'korean'], filename: pulc_demo_imgs/language_classification/word_35404.png +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="language_classification") +result = model.predict(input_data="pulc_demo_imgs/language_classification/word_35404.png") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="language_classification", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'class_ids': [4, 6], 'scores': [0.88672, 0.01434], 'label_names': ['japan', 'korean'], 'filename': 'pulc_demo_imgs/language_classification/word_35404.png'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +The models wo provided are trained with internal data, which is not open source yet. So it is suggested that constructing dataset based on open source dataset [Multi-lingual scene text detection and recognition](https://rrc.cvc.uab.es/?ch=15&com=downloads) to experience the this case. + +Some image of the processed dataset is as follows: + +![](../../images/PULC/docs/language_classification_original_data.png) + + + +#### 3.2.2 Getting Dataset + +The models provided support to classcify 10 languages, which as shown in the following list: + +`0` : means Arabic +`1` : means chinese_cht +`2` : means cyrillic +`3` : means devanagari +`4` : means Japanese +`5` : means ka +`6` : means Korean +`7` : means ta +`8` : means te +`9` : means Latin + +In the `Multi-lingual scene text detection and recognition`, only Arabic, Japanese, Korean and Latin data are included. 1600 images from each of the four languages are taken as the training data of this case, 300 images as the evaluation data, and 400 images as the supplementary data is used for the `SKL-UGI Knowledge Distillation`. + +Therefore, for the demo dataset in this case, the language categories are shown in following list: +`0` : means arabic +`4` : means japan +`6` : means korean +`9` : means latin + +**Note**: The images used in this task should be cropped by text from original image. Only the text line part is used as the image data. + +If you want to create your own dataset, you can collect and sort out the data of the required languages in your task as required. And you can also download the data processed directly. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/language_classification.tar +tar -xf language_classification.tar +cd ../ +``` + +The datas under `language_classification` directory: + +``` +├── img +│ ├── word_1.png +│ ├── word_2.png +... +├── train_list.txt +├── train_list_for_distill.txt +├── test_list.txt +└── label_list.txt +``` + +Where `img/` is the directory including 9200 images in 4 languages. The `train_list.txt` and `test_list.txt` are label files of training data and validation data respectively. `label_list.txt` is the mapping file corresponding to the four languages. `train_list_for_distill.txt` is the label list of images used for `SKL-UGI Knowledge Distillation`. + +**Note**: + +* About the contents format of `train_list.txt` and `val_list.txt`, please refer to [Description about Classification Dataset in PaddleClas](../data_preparation/classification_dataset_en.md). +* About the `train_list_for_distill.txt`, please refer to [Knowledge Distillation Label](../advanced_tutorials/distillation/distillation_en.md). + + + +### 3.3 Training + +The details of training config in `ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Arch.class_num=4 +``` + +**Note**: Because the class num of demo dataset is 4, the argument `-o Arch.class_num=4` should be specifed to change the prediction class num of model to 4. + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" \ + -o Arch.class_num=4 +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" \ + -o Arch.class_num=4 +``` + +The results: + +``` +[{'class_ids': [4, 9], 'scores': [0.96809, 0.01001], 'file_name': 'deploy/images/PULC/language_classification/word_35404.png', 'label_names': ['japan', 'latin']}] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/person_exists/objects365_02035329.jpg`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. +* Among the prediction results, `japan` means japanese and `korean` means korean. + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/language_classification/PPLCNet/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd \ + -o Arch.class_num=4 +``` + +The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + +**Note**: Training the ResNet101_vd model requires more GPU memory. If the memory is not enough, you can reduce the learning rate and batch size in the same proportion. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml`, the teacher model is `ResNet101_vd`, the student model is `PPLCNet_x1_0`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model \ + -o Arch.class_num=4 +``` + +The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_language_classification_infer +``` + +After running above command, the inference model files would be saved in `deploy/models/PPLCNet_x1_0_language_classification_infer`, as shown below: + +``` +├── PPLCNet_x1_0_language_classification_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/language_classification_infer.tar && tar -xf language_classification_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── language_classification_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify language about the image `./images/PULC/language_classification/word_35404.png`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/language_classification/inference_language_classification.yaml +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/language_classification/inference_language_classification.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +word_35404.png: class id(s): [4, 6], score(s): [0.89, 0.01], label_name(s): ['japan', 'korean'] +``` + +**Note**: Among the prediction results, `japan` means japanese and `korean` means korean. + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/language_classification/inference_language_classification.yaml -o Global.infer_imgs="./images/PULC/language_classification/" +``` + +All prediction results will be printed, as shown below. + +``` +word_17.png: class id(s): [9, 4], score(s): [0.80, 0.09], label_name(s): ['latin', 'japan'] +word_20.png: class id(s): [0, 4], score(s): [0.91, 0.02], label_name(s): ['arabic', 'japan'] +word_35404.png: class id(s): [4, 6], score(s): [0.89, 0.01], label_name(s): ['japan', 'korean'] +``` + +Among the prediction results above, `japan` means japanese, `latin` means latin, `arabic` means arabic and `korean` means korean. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_model_list_en.md b/docs/en/PULC/PULC_model_list_en.md new file mode 100644 index 0000000000000000000000000000000000000000..a7de0ce2c996132e6c882a10f5fcecd22398cc22 --- /dev/null +++ b/docs/en/PULC/PULC_model_list_en.md @@ -0,0 +1,25 @@ +# PULC Model Zoo + +------ + +The PULC model zoo is provided here, mainly providing indicators, model storage size, and download links of the model. The pre-trained model can be used for fine-tuning training, and the inference model can be directly used for prediction and deployment. + + +|Model name| Model Description | Metrics |Storage Size| Latency| Download Address| +| --- | --- | --- | --- | --- | --- | +| person_exists |[Human Exists Classification](PULC_person_exists_en.md)| 96.23 |7.0M|2.58ms|[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/person_exists_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/person_exists_pretrained.pdparams)| +| person_attribute |[Pedestrian Attribute Classification](PULC_person_attribute_en.md)| 78.59 |7.2M|2.01ms|[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/person_attribute_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/person_attribute_pretrained.pdparams)| +| safety_helmet |[Classification of Wheather Wearing Safety Helmet](PULC_safety_helmet_en.md)| 99.38 |7.1M|2.03ms|[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/safety_helmet_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/safety_helmet_pretrained.pdparams)| +| traffic_sign |[Traffic Sign Classification](PULC_traffic_sign_en.md)| 98.35 |8.2M|2.10ms|[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/traffic_sign_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/traffic_sign_pretrained.pdparams)| +| vehicle_attribute |[Vehicle Attribute Classification](PULC_vehicle_attribute_en.md)| 90.81 |7.2M|2.36ms|[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/vehicle_attribute_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/vehicle_attribute_pretrained.pdparams)| +| car_exists |[Car Exists Classification](PULC_car_exists_en.md) | 95.92 | 7.1M | 2.38ms |[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/car_exists_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/car_exists_pretrained.pdparams)| +| text_image_orientation |[Text Image Orientation Classification](PULC_text_image_orientation_en.md)| 99.06 | 7.1M | 2.16ms |[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/text_image_orientation_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/text_image_orientation_pretrained.pdparams)| +| textline_orientation |[Text-line Orientation Classification](PULC_textline_orientation_en.md)| 96.01 |7.0M|2.72ms|[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/textline_orientation_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/textline_orientation_pretrained.pdparams)| +| language_classification |[Language Classification](PULC_language_classification_en.md)| 99.26 |7.1M|2.58ms|[inference model](https://paddleclas.bj.bcebos.com/models/PULC/inference/language_classification_infer.tar) / [pretrained model](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/language_classification_pretrained.pdparams)| + + +**Note:** + +* The backbone of all the above models is PPLCNet_x1_0. The different sizes of some models are caused by the different output sizes of the classification layer. The inference time is tested on the Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. During the test process, the MKLDNN acceleration strategy is turned on, and the number of threads is 10. There will be slight fluctuations during the speed test process. + +* The evaluation indicators of person_exists, safety_helmet, and car_exists are TprAtFpr. The evaluation indicators of person_attribute and vehicle_attribute are ma. The evaluation indicators of traffic_sign, text_image_orientation, textline_orientation and language_classification are Top-1 Acc. diff --git a/docs/en/PULC/PULC_person_attribute_en.md b/docs/en/PULC/PULC_person_attribute_en.md new file mode 100644 index 0000000000000000000000000000000000000000..173313aad1a684289f3a6825cdf73ea01493847d --- /dev/null +++ b/docs/en/PULC/PULC_person_attribute_en.md @@ -0,0 +1,448 @@ +# PULC Recognition Model of Person Attribute + +------ + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of person attribute using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in +Pedestrian analysis scenarios, pedestrian tracking scenarios, etc. + +The following table lists the relevant indicators of the model. The first three lines means that using Res2Net200_vd_26w_4s, SwinTransformer_tiny and MobileNetV3_small_x0_35 as the backbone to training. The fourth to seventh lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. + + +| Backbone | ma(%) | Latency(ms) | Size(M) | Training Strategy | +|-------|-----------|----------|---------------|---------------| +| Res2Net200_vd_26w_4s | 81.25 | 77.51 | 293 | using ImageNet pretrained | +| SwinTransformer_tiny | 80.17 | 89.51 | 111 | using ImageNet pretrained | +| MobileNetV3_small_x0_35 | 70.79 | 2.90 | 1.7 | using ImageNet pretrained | +| PPLCNet_x1_0 | 76.31 | 2.01 | 7.1 | using ImageNet pretrained | +| PPLCNet_x1_0 | 77.31 | 2.01 | 7.1 | using SSLD pretrained | +| PPLCNet_x1_0 | 77.71 | 2.01 | 7.1 | using SSLD pretrained + EDA strategy| +| PPLCNet_x1_0 | 78.59 | 2.01 | 7.1 | using SSLD pretrained + EDA strategy + SKL-UGI knowledge distillation strategy| + +It can be seen that high ma metric can be getted when backbone are Res2Net200_vd_26w_4s and SwinTranformer_tiny, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the ma metric will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0, the ma metric is higher more 5.5 percentage points higher than MobileNetv3_small_x0_35. At the same time, the speed can be more than 20% faster. After additional using the SSLD pretrained model, the ma metric can be improved by about 1 percentage points without affecting the inference speed. Further, additional using the EDA strategy, the ma metric can be increased by 0.4 percentage points. Finally, after additional using the SKL-UGI knowledge distillation, the ma matric can be further improved by 0.88 percentage points. At this time, the ma metric of PPLCNet_x1_0 is only 1.58% different from SwinTransformer_tiny, but the speed is more than 44 times faster. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: + +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + + +* Prediction with CLI + +```bash +paddleclas --model_name=person_attribute --infer_imgs=pulc_demo_imgs/person_attribute/090004.jpg +``` + +Results: +``` +>>> result +attributes: ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], output: [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], filename: pulc_demo_imgs/person_attribute/090004.jpg +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="person_attribute") +result = model.predict(input_data="pulc_demo_imgs/person_attribute/090004.jpg") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="person_attribute", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], 'filename': 'pulc_demo_imgs/person_attribute/090004.jpg'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +The data used in this case is the [pa100k dataset](https://www.v7labs.com/open-datasets/pa-100k). + + + +#### 3.2.2 Getting Dataset + +Some image of the processed dataset is as follows: + +![](../../images/PULC/docs/person_attribute_data_demo.png) + + +We converted the data into a PaddleClas multi-label readable data format that can be downloaded directly. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/pa100k.tar +tar -xf pa100k.tar +cd ../ +``` + +The datas under `pa100k` directory: + +``` +pa100k +├── train +│   ├── 000001.jpg +│   ├── 000002.jpg +... +├── val +│   ├── 080001.jpg +│   ├── 080002.jpg +... +├── test +│   ├── 090001.jpg +│   ├── 090002.jpg +... +... +├── train_list.txt +├── train_val_list.txt +├── val_list.txt +├── test_list.txt +``` + +Where `train/`, `val/`, `test/` are training set, validation set and test set respectively. `train_list.txt`, `val_list.txt`, `test_list.txt` are the label files of the training set, validation set, and test set, respectively. In this example, `test_list.txt` is not used for now. + + + + +### 3.3 Training + +The details of training config in ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml +``` + +The best metric for the validation set is around `77.71%` (the dataset is small and generally fluctuates around 0.3%). + + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +The results: + +``` +[{'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]}] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/person_attribute/090004.jpg`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +The best metric for the validation set is around `80.10%`. The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml`, the teacher model is `ResNet101_vd`, the student model is `PPLCNet_x1_0`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +The best metric for the validation set is around `78.5%`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_person_attribute_infer +``` + +After running above command, the inference model files would be saved in `PPLCNet_x1_0_person_attribute_infer`, as shown below: + +``` +├── PPLCNet_x1_0_person_attribute_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/person_attribute_infer.tar && tar -xf person_attribute_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── person_attribute_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify whether there are human in the image `./images/PULC/person_attribute/090004.jpg`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/person_attribute/inference_person_attribute.yaml -o Global.use_gpu=True +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/person_attribute/inference_person_attribute.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +090004.jpg: {'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]} +``` + + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/person_attribute/inference_person_attribute.yaml -o Global.infer_imgs="./images/PULC/person_attribute/" +``` + +All prediction results will be printed, as shown below. + +``` +090004.jpg: {'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]} +090007.jpg: {'attributes': ['Female', 'Age18-60', 'Side', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'No bag', 'Upper: ShortSleeve', 'Lower: Skirt&Dress', 'No boots'], 'output': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0]} +``` + +Among the prediction results above, `someone` means that there is a human in the image, `nobody` means that there is no human in the image. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_person_exists_en.md b/docs/en/PULC/PULC_person_exists_en.md new file mode 100644 index 0000000000000000000000000000000000000000..baf5ce3e4c295a57d928853f5a0b3da1d3c7b366 --- /dev/null +++ b/docs/en/PULC/PULC_person_exists_en.md @@ -0,0 +1,458 @@ +# PULC Classification Model of Someone or Nobody + +------ + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of human exists using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in monitoring scenarios, personnel access control scenarios, massive data filtering scenarios, etc. + +The following table lists the relevant indicators of the model. The first two lines means that using SwinTransformer_tiny and MobileNetV3_small_x0_35 as the backbone to training. The third to sixth lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. + +| Backbone | Tpr(%) | Latency(ms) | Size(M)| Training Strategy | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 95.69 | 95.30 | 111 | using ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 68.25 | 2.85 | 2.6 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 89.57 | 2.12 | 7.0 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 92.10 | 2.12 | 7.0 | using SSLD pretrained model | +| PPLCNet_x1_0 | 93.43 | 2.12 | 7.0 | using SSLD pretrained model + EDA strategy | +| PPLCNet_x1_0 | 96.23 | 2.12 | 7.0 | using SSLD pretrained model + EDA strategy + SKL-UGI knowledge distillation strategy| + +It can be seen that high Tpr can be getted when backbone is SwinTranformer_tiny, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the Tpr will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0, the Tpr is higher more 20 percentage points than MobileNetv3_small_x0_35. At the same time, the speed can be more than 20% faster. After additional using the SSLD pretrained model, the Tpr can be improved by about 2.6 percentage points without affecting the inference speed. Further, additional using the EDA strategy, the Tpr can be increased by 1.3 percentage points. Finally, after additional using the SKL-UGI knowledge distillation, the Tpr can be further improved by 2.8 percentage points. At this point, the Tpr is close to that of SwinTranformer_tiny, but the speed is more than 40 times faster. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: + +* About `Tpr` metric, please refer to [3.2 section](#3.2) for more information . +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + +* Prediction with CLI + +```bash +paddleclas --model_name=person_exists --infer_imgs=pulc_demo_imgs/person_exists/objects365_01780782.jpg +``` + +Results: +``` +>>> result +class_ids: [0], scores: [0.9955421453341842], label_names: ['nobody'], filename: pulc_demo_imgs/person_exists/objects365_01780782.jpg +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="person_exists") +result = model.predict(input_data="pulc_demo_imgs/person_exists/objects365_01780782.jpg") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="person_exists", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'class_ids': [0], 'scores': [0.9955421453341842], 'label_names': ['nobody'], 'filename': 'pulc_demo_imgs/person_exists/objects365_01780782.jpg'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +All datasets used in this case are open source data. Train data is the subset of [MS-COCO](https://cocodataset.org/#overview) training data. And the validation data is the subset of [Object365](https://www.objects365.org/overview.html) training data. ImageNet_val is [ImageNet-1k](https://www.image-net.org/) validation data. + + + +#### 3.2.2 Getting Dataset + +The data used in this case can be getted by processing the open source data. The detailed processes are as follows: + +- Training data. This case deals with the annotation file of MS-COCO data training data. If a certain image contains the label of "person" and the area of this box is greater than 10% in the whole image, it is considered that the image contains human. If there is no label of "person" in a certain image, It is considered that the image does not contain human. After processing, 92964 pieces of available data were obtained, including 39813 images containing human and 53151 images without containing human. +- Validation data: randomly select a small part of data from object365 data, use the better model trained on MS-COCO to predict these data, take the intersection between the prediction results and the data annotation file, and filter the intersection results into the validation set according to the method of obtaining the training set. After processing, 27820 pieces of available data were obtained. There are 2255 pieces of data with human and 25565 pieces of data without human. The data visualization of the processed dataset is as follows: + +Some image of the processed dataset is as follows: + +![](../../images/PULC/docs/person_exists_data_demo.png) + +And you can also download the data processed directly. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/person_exists.tar +tar -xf person_exists.tar +cd ../ +``` + +The datas under `person_exists` directory: + +``` +├── train +│   ├── 000000000009.jpg +│   ├── 000000000025.jpg +... +├── val +│   ├── objects365_01780637.jpg +│   ├── objects365_01780640.jpg +... +├── ImageNet_val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +... +├── train_list.txt +├── train_list.txt.debug +├── train_list_for_distill.txt +├── val_list.txt +└── val_list.txt.debug +``` + +Where `train/` and `val/` are training set and validation set respectively. The `train_list.txt` and `val_list.txt` are label files of training data and validation data respectively. The file `train_list.txt.debug` and `val_list.txt.debug` are subset of `train_list.txt` and `val_list.txt` respectively. `ImageNet_val/` is the validation data of ImageNet-1k, which will be used for SKL-UGI knowledge distillation, and its label file is `train_list_for_distill.txt`. + +**Note**: + +* About the contents format of `train_list.txt` and `val_list.txt`, please refer to [Description about Classification Dataset in PaddleClas](../data_preparation/classification_dataset_en.md). +* About the `train_list_for_distill.txt`, please refer to [Knowledge Distillation Label](../advanced_tutorials/distillation/distillation_en.md). + + + +### 3.3 Training + +The details of training config in `ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml +``` + +The best metric of validation data is between `0.94` and `0.95`. There would be fluctuations because the data size is small. + +**Note**: + +* The metric Tpr, that describe the True Positive Rate when False Positive Rate is less than a certain threshold(1/1000 used in this case), is one of the commonly used metric for binary classification. About the details of Fpr and Tpr, please refer [here](https://en.wikipedia.org/wiki/Receiver_operating_characteristic). +* When evaluation, the best metric TprAtFpr will be printed that include `Fpr`, `Tpr` and the current `threshold`. The `Tpr` means the Recall rate under the current `Fpr`. The `Tpr` higher, the model better. The `threshold` would be used in deployment, which means the classification threshold under best `Fpr` metric. + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +The results: + +``` +[{'class_ids': [1], 'scores': [0.9999976], 'label_names': ['someone'], 'file_name': 'deploy/images/PULC/person_exists/objects365_02035329.jpg'}] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/person_exists/objects365_02035329.jpg`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. +* The default threshold is `0.5`. If needed, you can specify the argument `Infer.PostProcess.threshold`, such as: `-o Infer.PostProcess.threshold=0.9794`. And the argument `threshold` is needed to be specified according by specific case. The `0.9794` is the best threshold when `Fpr` is less than `1/1000` in this valuation dataset. + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/person_exists/PPLCNet/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +The best metric of validation data is between `0.96` and `0.98`. The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml`, the teacher model is `ResNet101_vd`, the student model is `PPLCNet_x1_0` and the additional unlabeled training data is validation data of ImageNet1k. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +The best metric is between `0.95` and `0.97`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_person_exists_infer +``` + +After running above command, the inference model files would be saved in `deploy/models/PPLCNet_x1_0_person_exists_infer`, as shown below: + +``` +├── PPLCNet_x1_0_person_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/person_exists_infer.tar && tar -xf person_exists_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── person_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify whether there are humans in the image `./images/PULC/person_exists/objects365_02035329.jpg`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/person_exists/inference_person_exists.yaml +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/person_exists/inference_person_exists.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone'] +``` + +**Note**: The default threshold is `0.5`. If needed, you can specify the argument `Infer.PostProcess.threshold`, such as: `-o Infer.PostProcess.threshold=0.9794`. And the argument `threshold` is needed to be specified according by specific case. The `0.9794` is the best threshold when `Fpr` is less than `1/1000` in this valuation dataset. Please refer to [3.3 section](#3.3) for details. + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/person_exists/inference_person_exists.yaml -o Global.infer_imgs="./images/PULC/person_exists/" +``` + +All prediction results will be printed, as shown below. + +``` +objects365_01780782.jpg: class id(s): [0], score(s): [1.00], label_name(s): ['nobody'] +objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone'] +``` + +Among the prediction results above, `someone` means that there is a human in the image, `nobody` means that there is no human in the image. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_quickstart_en.md b/docs/en/PULC/PULC_quickstart_en.md new file mode 100644 index 0000000000000000000000000000000000000000..087c359283c0e288db91bc80774163eda336853b --- /dev/null +++ b/docs/en/PULC/PULC_quickstart_en.md @@ -0,0 +1,123 @@ +# PULC Quick Start + +------ + +This document introduces the prediction using PULC series model based on PaddleClas wheel. + +## Catalogue + +- [1. Installation](#1) + - [1.1 PaddlePaddle Installation](#11) + - [1.2 PaddleClas wheel Installation](#12) +- [2. Quick Start](#2) + - [2.1 Predicion with Command Line](#2.1) + - [2.2 Predicion with Python](#2.2) + - [2.3 Supported Model List](#2.3) +- [3. Summary](#3) + + + +## 1. Installation + + + +### 1.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 1.2 PaddleClas wheel Installation + +```bash +pip3 install paddleclas +``` + + + +## 2. Quick Start + +PaddleClas provides a series of test cases, which contain demos of different scenes about people, cars, OCR, etc. Click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download the data. + + + +### 2.1 Predicion with Command Line + +``` +cd /path/to/pulc_demo_imgs +``` + +The prediction command: + +```bash +paddleclas --model_name=person_exists --infer_imgs=pulc_demo_imgs/person_exists/objects365_01780782.jpg +``` + +Result: + +``` +>>> result +class_ids: [0], scores: [0.9955421453341842], label_names: ['nobody'], filename: pulc_demo_imgs/person_exists/objects365_01780782.jpg +Predict complete! +``` +`Nobody` means there is no one in the image, `someone` means there is someone in the image. Therefore, the prediction result indicates that there is no one in the figure. + +**Note**: The "--infer_imgs" argument specify the image(s) to be predict, and you can also specify a directoy contains images. If use other model, you can specify the `--model_name` argument. Please refer to [2.3 Supported Model List](#2.3) for the supported model list. + + + +### 2.2 Predicion with Python + +You can also use in Python: + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="person_exists") +result = model.predict(input_data="pulc_demo_imgs/person_exists/objects365_01780782.jpg") +print(next(result)) +``` + +The printed result information: + +``` +>>> result +[{'class_ids': [0], 'scores': [0.9955421453341842], 'label_names': ['nobody'], 'filename': 'pulc_demo_imgs/person_exists/objects365_01780782.jpg'}] +``` + +**Note**: `model.predict()` is a generator, so `next()` or `for` is needed to call it. This would to predict by batch that length is `batch_size`, default by 1. You can specify the argument `batch_size` and `model_name` when instantiating PaddleClas object, for example: `model = paddleclas.PaddleClas(model_name="person_exists", batch_size=2)`. Please refer to [2.3 Supported Model List](#2.3) for the supported model list. + + + +### 2.3 Supported Model List + +The name of PULC series models are as follows: + +| Name | Intro | +| --- | --- | +| person_exists | Human Exists Classification | +| person_attribute | Pedestrian Attribute Classification | +| safety_helmet | Classification of Wheather Wearing Safety Helmet | +| traffic_sign | Traffic Sign Classification | +| vehicle_attribute | Vehicle Attribute Classification | +| car_exists | Car Exists Classification | +| text_image_orientation | Text Image Orientation Classification | +| textline_orientation | Text-line Orientation Classification | +| language_classification | Language Classification | + + + +## 3. Summary + +The PULC series models have been verified to be effective in different scenarios about people, vehicles, OCR, etc. The ultra lightweight model can achieve the accuracy close to SwinTransformer model, and the speed is increased by 40+ times. And PULC also provides the whole process of dataset getting, model training, model compression and deployment. Please refer to [Human Exists Classification](PULC_person_exists_en.md)、[Pedestrian Attribute Classification](PULC_person_attribute_en.md)、[Classification of Wheather Wearing Safety Helmet](PULC_safety_helmet_en.md)、[Traffic Sign Classification](PULC_traffic_sign_en.md)、[Vehicle Attribute Classification](PULC_vehicle_attribute_en.md)、[Car Exists Classification](PULC_car_exists_en.md)、[Text Image Orientation Classification](PULC_text_image_orientation_en.md)、[Text-line Orientation Classification](PULC_textline_orientation_en.md)、[Language Classification](PULC_language_classification_en.md) for more information about different scenarios. diff --git a/docs/en/PULC/PULC_safety_helmet_en.md b/docs/en/PULC/PULC_safety_helmet_en.md new file mode 100644 index 0000000000000000000000000000000000000000..d2e5cb32931cdc98b0776f4692e6162e907aa6fa --- /dev/null +++ b/docs/en/PULC/PULC_safety_helmet_en.md @@ -0,0 +1,432 @@ +# PULC Classification Model of Wheather Wearing Safety Helmet or Not + +----- + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of wheather wearing safety helmet using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in construction scenes, factory workshop scenes, traffic scenes and so on. + +The following table lists the relevant indicators of the model. The first three lines means that using SwinTransformer_tiny, Res2Net200_vd_26w_4s and MobileNetV3_small_x0_35 as the backbone to training. The fourth to seventh lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. + +| Backbone | Tpr(%) | Latency(ms) | Size(M)| Training Strategy | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 93.57 | 91.32 | 111 | using ImageNet pretrained model | +| Res2Net200_vd_26w_4s | 98.92 | 80.99 | 284 | using ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 84.83 | 2.85 | 2.6 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 93.27 | 2.03 | 7.1 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 98.16 | 2.03 | 7.1 | using SSLD pretrained model | +| PPLCNet_x1_0 | 99.30 | 2.03 | 7.1 | using SSLD pretrained model + EDA strategy | +| PPLCNet_x1_0 | 99.38 | 2.03 | 7.1 | using SSLD pretrained model + EDA strategy + SKL-UGI knowledge distillation strategy| + +It can be seen that high Tpr can be getted when backbone is Res2Net200_vd_26w_4s, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the Tpr will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0, the Tpr is higher more 8.5 percentage points than MobileNetv3_small_x0_35. At the same time, the speed can be more than 20% faster. After additional using the SSLD pretrained model, the Tpr can be improved by about 4.9 percentage points without affecting the inference speed. Further, additional using the EDA strategy, the Tpr can be increased by 1.1 percentage points. Finally, after additional using the UDML knowledge distillation, the Tpr can be further improved by 2.2 percentage points. At this point, the Tpr is higher than that of Res2Net200_vd_26w_4s, but the speed is more than 70 times faster. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: + +* About `Tpr` metric, please refer to [3.2 section](#3.2) for more information . +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + +* Prediction with CLI + +```bash +paddleclas --model_name=safety_helmet --infer_imgs=pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png +``` + +Results: + +``` +>>> result +class_ids: [1], scores: [0.9986255], label_names: ['unwearing_helmet'], filename: pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png +Predict complete! +``` +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="safety_helmet") +result = model.predict(input_data="pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="safety_helmet", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'class_ids': [1], 'scores': [0.9986255], 'label_names': ['unwearing_helmet'], 'filename': 'pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +All datasets used in this case are open source data. Train data is the subset of [Safety-Helmet-Wearing-Dataset](https://github.com/njvisionpower/Safety-Helmet-Wearing-Dataset), [hard-hat-detection](https://www.kaggle.com/datasets/andrewmvd/hard-hat-detection) and [Large-scale CelebFaces Attributes (CelebA) Dataset](https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html). + + + +#### 3.2.2 Getting Dataset + +The data used in this case can be getted by processing the open source data. The detailed processes are as follows: + +* `Safety-Helmet-Wearing-Dataset`: according to the bbox label data, the image is cropped by enlarging width and height of bbox by 3 times. The label is 0 if wearing safety helmet in the image, and the label is 1 if not; +* `hard-hat-detection`: Only use the image that labeled "hat" and crop it with bbox. The label is 0; +* `CelebA`: Only use the image labeled "wearing_hat" and crop it with bbox. The label is 0; + +After processing, the dataset totals about 150000 images, of which the number of images with and without wearing safety helmet is about 28000 and 121000 respectively. Then 5600 images are randomly selected in the two labels as the valuation data, a total of about 11200 images, and about 138000 other images as the training data. + +Some image of the processed dataset is as follows: + +![](../../images/PULC/docs/safety_helmet_data_demo.jpg) + +And you can also download the data processed directly. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/safety_helmet.tar +tar -xf safety_helmet.tar +cd ../ +``` + +The datas under `safety_helmet` directory: + +``` +├── images +│   ├── VOC2028_part2_001209_1.jpg +│   ├── HHD_hard_hat_workers23_1.jpg +│   ├── CelebA_077809.jpg +│   ├── ... +│   └── ... +├── train_list.txt +└── val_list.txt +``` + +The `train_list.txt` and `val_list.txt` are label files of training data and validation data respectively. All images in `images/` directory. + +**Note**: + +* About the contents format of `train_list.txt` and `val_list.txt`, please refer to [Description about Classification Dataset in PaddleClas](../data_preparation/classification_dataset_en.md). + + + +### 3.3 Training + +The details of training config in `ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml +``` + +The best metric of validation data is between `0.985` and `0.993`. There would be fluctuations because the data size is small. + +**Note**: + +* The metric Tpr, that describe the True Positive Rate when False Positive Rate is less than a certain threshold(1/10000 used in this case), is one of the commonly used metric for binary classification. About the details of Fpr and Tpr, please refer [here](https://en.wikipedia.org/wiki/Receiver_operating_characteristic). +* When evaluation, the best metric TprAtFpr will be printed that include `Fpr`, `Tpr` and the current `threshold`. The `Tpr` means the Recall rate under the current `Fpr`. The `Tpr` higher, the model better. The `threshold` would be used in deployment, which means the classification threshold under best `Fpr` metric. + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +The results: + +``` +[{'class_ids': [1], 'scores': [0.9524797], 'label_names': ['unwearing_helmet'], 'file_name': 'deploy/images/PULC/safety_helmet/safety_helmet_test_1.png'}] +``` + +**备注:** + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/safety_helmet/safety_helmet_test_1.png`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. +* The default threshold is `0.5`. If needed, you can specify the argument `Infer.PostProcess.threshold`, such as: `-o Infer.PostProcess.threshold=0.9167`. And the argument `threshold` is needed to be specified according by specific case. The `0.9167` is the best threshold when `Fpr` is less than `1/10000` in this valuation dataset. + + + +## 4. Model Compression + + + +### 4.1 UDML Knowledge Distillation + +UDML is a simple but effective knowledge distillation algrithem proposed by PaddleClas. Please refer to [UDML 知识蒸馏](../advanced_tutorials/knowledge_distillation_en.md#1.2.3) for more details. + + + +#### 4.1.1 Knowledge Distillation Training + +Training with hyperparameters specified in `ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml +``` + +The best metric is between `0.990` and `0.993`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_safety_helmet_infer +``` + +After running above command, the inference model files would be saved in `deploy/models/PPLCNet_x1_0_safety_helmet_infer`, as shown below: + +``` +├── PPLCNet_x1_0_safety_helmet_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/safety_helmet_infer.tar && tar -xf safety_helmet_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── safety_helmet_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify whether wearing safety helmet about the image `./images/PULC/safety_helmet/safety_helmet_test_1.png`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/safety_helmet/inference_safety_helmet.yaml +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/safety_helmet/inference_safety_helmet.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +safety_helmet_test_1.png: class id(s): [1], score(s): [1.00], label_name(s): ['unwearing_helmet'] +``` + +**Note**: The default threshold is `0.5`. If needed, you can specify the argument `Infer.PostProcess.threshold`, such as: `-o Infer.PostProcess.threshold=0.9167`. And the argument `threshold` is needed to be specified according by specific case. The `0.9167` is the best threshold when `Fpr` is less than `1/10000` in this valuation dataset. Please refer to [3.3 section](#3.3) for details. + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/safety_helmet/inference_safety_helmet.yaml -o Global.infer_imgs="./images/PULC/safety_helmet/" +``` + +All prediction results will be printed, as shown below. + +``` +safety_helmet_test_1.png: class id(s): [1], score(s): [1.00], label_name(s): ['unwearing_helmet'] +safety_helmet_test_2.png: class id(s): [0], score(s): [1.00], label_name(s): ['wearing_helmet'] +``` + +Among the prediction results above, `wearing_helmet` means that wearing safety helmet about the image, `unwearing_helmet` means not. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_text_image_orientation_en.md b/docs/en/PULC/PULC_text_image_orientation_en.md new file mode 100644 index 0000000000000000000000000000000000000000..1d3cc41f992adff90f396463205cd060147023c1 --- /dev/null +++ b/docs/en/PULC/PULC_text_image_orientation_en.md @@ -0,0 +1,466 @@ +# PULC Classification Model of Text Image Orientation + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +In the process of document scanning, license shooting and so on, sometimes in order to shoot more clearly, the camera device will be rotated, resulting in photo in different directions. At this time, the standard OCR process cannot cope with these issues well. Using the text image orientation classification technology, the direction of the text image can be predicted and adjusted, so as to improve the accuracy of OCR processing. This case provides a way for users to use PaddleClas PULC (Practical Ultra Lightweight image Classification) to quickly build a lightweight, high-precision, practical classification model of text image orientation. This model can be widely used in OCR processing scenarios of rotating pictures in financial, government and other industries. + +The following table lists the relevant indicators of the model. The first two lines means that using SwinTransformer_tiny and MobileNetV3_small_x0_35 as the backbone to training. The third to fifth lines means that the backbone is replaced by PPLCNet, additional use of SSLD pretrained model and additional use of hyperparameters searching strategy. + +| Backbone | Top1-Acc(%) | Latency(ms) | Size(M)| Training Strategy | +| ----------------------- | --------- | ---------- | --------- | ------------------------------------- | +| SwinTranformer_tiny | 99.12 | 89.65 | 111 | using ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 83.61 | 2.95 | 2.6 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 97.85 | 2.16 | 7.1 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 98.02 | 2.16 | 7.1 | using SSLD pretrained model | +| **PPLCNet_x1_0** | **99.06** | **2.16** | **7.1** | using SSLD pretrained model + hyperparameters searching strategy | + +It can be seen that high accuracy can be getted when backbone is SwinTranformer_tiny, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the accuracy will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0, the accuracy is higher more 14 percentage points than MobileNetv3_small_x0_35. At the same time, the speed can be more faster. After additional using the SSLD pretrained model, the accuracy can be improved by about 0.17 percentage points without affecting the inference speed. Finally, after additional using the hyperparameters searching strategy, the accuracy can be further improved by 1.04 percentage points. At this point, the accuracy is close to that of SwinTranformer_tiny, but the speed is more faster. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: + +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + +* Prediction with CLI + +```bash +paddleclas --model_name=text_image_orientation --infer_imgs=pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg +``` + +Results: + +``` +>>> result +class_ids: [0, 2], scores: [0.85615, 0.05046], label_names: ['0', '180'], filename: pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="text_image_orientation") +result = model.predict(input_data="pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="text_image_orientation", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'class_ids': [0, 2], 'scores': [0.85615, 0.05046], 'label_names': ['0', '180'], 'filename': 'pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +The model provided in [1 section](#1) is trained using internal data, which has not been open source. So we provide a dataset with [ICDAR2019-ArT](https://ai.baidu.com/broad/introduction?dataset=art), [XFUND](https://github.com/doc-analysis/XFUND) and [ICDAR2015](https://rrc.cvc.uab.es/?ch=4&com=introduction) to experience. + +![](../../images/PULC/docs/text_image_orientation_original_data.png) + + + +#### 3.2.2 Getting Dataset + +The data used in this case can be getted by processing the open source data. The detailed processes are as follows: + +Considering the resolution of original image is too high to need long training time, all the data are scaled in advance. Keeping image aspect ratio, the short edge is scaled to 384. Then rotate the data clockwise to generate composite data of 90 degrees, 180 degrees and 270 degrees respectively. Among them, 41460 images generated by ICDAR2019-ArT and XFUND are randomly divided into training set and verification set according to the ratio of 9:1. 6000 images generated by ICDAR2015 are used as supplementary data in the experiment of `SKL-UGI knowledge distillation`. + +Some image of the processed dataset is as follows: + +![](../../images/PULC/docs/text_image_orientation_data_demo.png) + +And you can also download the data processed directly. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/text_image_orientation.tar +tar -xf text_image_orientation.tar +cd ../ +``` + +The datas under `text_image_orientation` directory: + +``` +├── img_0 +│ ├── img_rot0_0.jpg +│ ├── img_rot0_1.png +... +├── img_90 +│ ├── img_rot90_0.jpg +│ ├── img_rot90_1.png +... +├── img_180 +│ ├── img_rot180_0.jpg +│ ├── img_rot180_1.png +... +├── img_270 +│ ├── img_rot270_0.jpg +│ ├── img_rot270_1.png +... +├── distill_data +│ ├── gt_7060_0.jpg +│ ├── gt_7060_90.jpg +... +├── train_list.txt +├── train_list.txt.debug +├── train_list_for_distill.txt +├── test_list.txt +├── test_list.txt.debug +└── label_list.txt +``` + +Where `img_0/`, `img_90/`, `img_180/` and `img_270/` are data of 4 angles respectively. The `train_list.txt` and `val_list.txt` are label files of training data and validation data respectively. The file `train_list.txt.debug` and `val_list.txt.debug` are subset of `train_list.txt` and `val_list.txt` respectively. `distill_data/` is the supplementary data, which will be used for SKL-UGI knowledge distillation, and its label file is `train_list_for_distill.txt`. + +**Note**: + +* About the contents format of `train_list.txt` and `val_list.txt`, please refer to [Description about Classification Dataset in PaddleClas](../data_preparation/classification_dataset_en.md). +* About the `train_list_for_distill.txt`, please refer to [Knowledge Distillation Label](../advanced_tutorials/distillation/distillation_en.md). + + + +### 3.3 Training + +The details of training config in `ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml +``` + +The best metric of validation data is about `0.99`. + + +**Note**: +* The metric mentioned in this document are training on large-scale internal dataset. When using demo data to train, this metric cannot be achieved because the dataset is small and the distribution is different from large-scale internal data. You can further expand your own data and use the optimization method described in this case to achieve higher accuracy. + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +The results: + +``` +[{'class_ids': [0, 2], 'scores': [0.85615, 0.05046], 'file_name': 'deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg', 'label_names': ['0', '180']}] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. +* The Top2 result would be printed. `0` means that the text direction of the drawing is 0 degrees, `90` means that 90 degrees clockwise, `180` means that 180 degrees clockwise, `270` means that 270 degrees clockwise. + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/text_image_orientation/PPLCNet/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +The best metric of validation data is about `0.996`. The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + +**Note**: Training ResNet101_vd need more GPU memory. So you can reduce `batch_size` and `learning rate` at the same time, such as: `-o DataLoader.Train.sampler.batch_size=64`, `Optimizer.lr.learning_rate=0.1`. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml`, the teacher model is `ResNet101_vd` and the student model is `PPLCNet_x1_0`. + +The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +The best metric is about `0.99`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_text_image_orientation_infer +``` + +After running above command, the inference model files would be saved in `deploy/models/PPLCNet_x1_0_text_image_orientation_infer`, as shown below: + +``` +├── PPLCNet_x1_0_text_image_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/text_image_orientation_infer.tar && tar -xf text_image_orientation_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── text_image_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify text image orientation about image `./images/PULC/text_image_orientation/img_rot0_demo.png`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/text_image_orientation/inference_text_image_orientation.yaml +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/text_image_orientation/inference_text_image_orientation.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +img_rot0_demo.jpg: class id(s): [0, 2], score(s): [0.86, 0.05], label_name(s): ['0', '180'] +``` + +Among the results, `0` means that the text direction of the drawing is 0 degrees, `90` means that 90 degrees clockwise, `180` means that 180 degrees clockwise, `270` means that 270 degrees clockwise. + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/text_image_orientation/inference_text_image_orientation.yaml -o Global.infer_imgs="./images/PULC/text_image_orientation/" +``` + +All prediction results will be printed, as shown below. + +``` +img_rot0_demo.jpg: class id(s): [0, 2], score(s): [0.86, 0.05], label_name(s): ['0', '180'] +img_rot180_demo.jpg: class id(s): [2, 1], score(s): [0.88, 0.04], label_name(s): ['180', '90'] +``` + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_textline_orientation_en.md b/docs/en/PULC/PULC_textline_orientation_en.md new file mode 100644 index 0000000000000000000000000000000000000000..d11307d0b5aafe056c1f1e53a85882d2449ac277 --- /dev/null +++ b/docs/en/PULC/PULC_textline_orientation_en.md @@ -0,0 +1,450 @@ +# PULC Classification Model of Textline Orientation + +------ + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of textline orientation using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in character correction, character recognition, etc. + +The following table lists the relevant indicators of the model. The first two lines means that using SwinTransformer_tiny and MobileNetV3_small_x0_35 as the backbone to training. The third to seventh lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. + +| Backbone | Top-1 Acc(%) | Latency(ms) | Size(M)| Training Strategy | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 93.61 | 89.64 | 111 | using ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 81.40 | 2.96 | 2.6 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 89.99 | 2.11 | 7.0 | using ImageNet pretrained model | +| PPLCNet_x1_0* | 94.06 | 2.68 | 7.0 | using ImageNet pretrained model | +| PPLCNet_x1_0* | 94.11 | 2.68 | 7.0 | using SSLD pretrained model | +| PPLCNet_x1_0** | 96.01 | 2.72 | 7.0 | using SSLD pretrained model + EDA strategy | +| PPLCNet_x1_0** | 95.86 | 2.72 | 7.0 | using SSLD pretrained model + EDA strategy + SKL-UGI knowledge distillation strategy| + +It can be seen that high accuracy can be getted when backbone is SwinTranformer_tiny, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the accuracy will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0, the accuracy is higher more 8.6 percentage points than MobileNetv3_small_x0_35. At the same time, the speed can be more than 10% faster. On this basis, by changing the resolution and stripe (refer to [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)), the speed becomes 27% slower, but the accuracy can be improved by 4.5 percentage points. After additional using the SSLD pretrained model, the accuracy can be improved by about 0.05 percentage points without affecting the inference speed. Finally, additional using the EDA strategy, the accuracy can be increased by 1.9 percentage points. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: +* Backbone name without \* means the resolution is 224x224, and with \* means the resolution is 48x192 (h\*w). The stride of the network is changed to `[2, [2, 1], [2, 1], [2, 1]`. Please refer to [PaddleOCR]( https://github.com/PaddlePaddle/PaddleOCR)for more details. +* Backbone name with \*\* means that the resolution is 80x160 (h\*w), and the stride of the network is changed to `[2, [2, 1], [2, 1], [2, 1]]`. This resolution is searched by [Hyperparameter Searching](pulc_train_en.md#4). +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + +* Prediction with CLI + +```bash +paddleclas --model_name=textline_orientation --infer_imgs=pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png +``` + +Results: + +``` +>>> result +class_ids: [0], scores: [1.0], label_names: ['0_degree'], filename: pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="textline_orientation") +result = model.predict(input_data="pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="textline_orientation", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'class_ids': [0], 'scores': [1.0], 'label_names': ['0_degree'], 'filename': 'pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +The data used in this case come from internal data. If you want to experience the training process, you can use open source data, such as [ICDAR2019-LSVT](https://aistudio.baidu.com/aistudio/datasetdetail/8429). + + + +#### 3.2.2 Getting Dataset + +Take ICDAR2019-LSVT for example, images with ID numbers from 0 to 1999 would be processed and used. After rotation, it is divided into class 0 or class 1. Class 0 means that the textline rotation angle is 0 degrees, and class 1 means 180 degrees. + +- Training data: The images with ID number from 0 to 1799 are used as the training set. 3600 images in total. +- Evaluation data: The images with ID number from 1800 to 1999 are used as the evaluation set. 400 images in total. + +Some image of the processed dataset is as follows: + +![](../../images/PULC/docs/textline_orientation_data_demo.png) + +And you can also download the data processed directly. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/textline_orientation.tar +tar -xf textline_orientation.tar +cd ../ +``` + +The datas under `textline_orientation` directory: + +``` +├── 0 +│   ├── img_0.jpg +│   ├── img_1.jpg +... +├── 1 +│   ├── img_0.jpg +│   ├── img_1.jpg +... +├── train_list.txt +└── val_list.txt +``` + +其中 `0/` 和 `1/` 分别存放 0 类和 1 类的数据。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + +Where `0/` and `1/` are class 0 and class 1 data respectively. The `train_list.txt` and `val_list.txt` are label files of training data and validation data respectively. + +**Note**: + +* About the contents format of `train_list.txt` and `val_list.txt`, please refer to [Description about Classification Dataset in PaddleClas](../data_preparation/classification_dataset_en.md). + + + +### 3.3 Training + +The details of training config in `ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml +``` + +**Note**: + +* Because the ICDAR2019-LSVT data set is different from the dataset used in the provided pretrained model. If you want to get higher accuracy, you can process [ICDAR2019-LSVT](https://aistudio.baidu.com/aistudio/datasetdetail/8429). + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +The results: + +``` +[{'class_ids': [0], 'scores': [1.0], 'file_name': 'deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png', 'label_names': ['0_degree']}] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/textline_orientation/PPLCNet/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +The best metric of validation data is between `0.96` and `0.98`. The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml`, the teacher model is `ResNet101_vd` and the student model is `PPLCNet_x1_0`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +The best metric is between `0.95` and `0.97`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_textline_orientation_infer +``` + +After running above command, the inference model files would be saved in `deploy/models/PPLCNet_x1_0_textline_orientation_infer`, as shown below: + +``` +├── PPLCNet_x1_0_textline_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/textline_orientation_infer.tar && tar -xf textline_orientation_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── textline_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify the rotation of image `./images/PULC/textline_orientation/objects365_02035329.jpg`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/textline_orientation/inference_textline_orientation.yaml +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/textline_orientation/inference_textline_orientation.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +textline_orientation_test_0_0.png: class id(s): [0], score(s): [1.00], label_name(s): ['0_degree'] +``` + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/textline_orientation/inference_textline_orientation.yaml -o Global.infer_imgs="./images/PULC/textline_orientation/" +``` + +All prediction results will be printed, as shown below. + +``` +textline_orientation_test_0_0.png: class id(s): [0], score(s): [1.00], label_name(s): ['0_degree'] +textline_orientation_test_0_1.png: class id(s): [0], score(s): [1.00], label_name(s): ['0_degree'] +textline_orientation_test_1_0.png: class id(s): [1], score(s): [1.00], label_name(s): ['180_degree'] +textline_orientation_test_1_1.png: class id(s): [1], score(s): [1.00], label_name(s): ['180_degree'] +``` + +Among the prediction results above, `0_degree` means that the rotation angle of the textline image is 0, and `180_degree` means that 180. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_traffic_sign_en.md b/docs/en/PULC/PULC_traffic_sign_en.md new file mode 100644 index 0000000000000000000000000000000000000000..baa0faf4828a6c7acc16f8c12587a2af58c04f99 --- /dev/null +++ b/docs/en/PULC/PULC_traffic_sign_en.md @@ -0,0 +1,475 @@ +# PULC Classification Model of Traffic Sign + +------ + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of traffic sign using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in automatic driving, road monitoring, etc. + +The following table lists the relevant indicators of the model. The first two lines means that using SwinTransformer_tiny and MobileNetV3_small_x0_35 as the backbone to training. The third to sixth lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. + +| Backbone | Top-1 Acc(%) | Latency(ms) | Size(M)| Training Strategy | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 98.11 | 89.45 | 111 | using ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 93.88 | 3.01 | 3.9 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 97.78 | 2.10 | 8.2 | using ImageNet pretrained model | +| PPLCNet_x1_0 | 97.84 | 2.10 | 8.2 | using SSLD pretrained model | +| PPLCNet_x1_0 | 98.14 | 2.10 | 8.2 | using SSLD pretrained model + EDA strategy | +| PPLCNet_x1_0 | 98.35 | 2.10 | 8.2 | using SSLD pretrained model + EDA strategy + SKL-UGI knowledge distillation strategy| + +It can be seen that high accuracy can be getted when backbone is SwinTranformer_tiny, but the speed is slow. Replacing backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the accuracy will be greatly reduced. Replacing backbone with faster backbone PPLCNet_x1_0, the accuracy is lower 3.9 percentage points than MobileNetv3_small_x0_35. At the same time, the speed can be more than 43% faster. After additional using the SSLD pretrained model, the accuracy can be improved by about 0.06 percentage points without affecting the inference speed. Further, additional using the EDA strategy, the accuracy can be increased by 0.3 percentage points. Finally, after additional using the SKL-UGI knowledge distillation, the accuracy can be further improved by 0.21 percentage points. At this point, the accuracy exceeds that of SwinTranformer_tiny, but the speed is more than 41 times faster. The training method and deployment instructions of PULC will be introduced in detail below. + +**Note**: + +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + +* Prediction with CLI + +```bash +paddleclas --model_name=traffic_sign --infer_imgs=pulc_demo_imgs/traffic_sign/100999_83928.jpg +``` + +Results: + +``` +>>> result +class_ids: [182, 179, 162, 128, 24], scores: [0.98623, 0.01255, 0.00022, 0.00021, 0.00012], label_names: ['pl110', 'pl100', 'pl120', 'p26', 'pm10'], filename: pulc_demo_imgs/traffic_sign/100999_83928.jpg +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="traffic_sign") +result = model.predict(input_data="pulc_demo_imgs/traffic_sign/100999_83928.jpg") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="traffic_sign", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'class_ids': [182, 179, 162, 128, 24], 'scores': [0.98623, 0.01255, 0.00022, 0.00021, 0.00012], 'label_names': ['pl110', 'pl100', 'pl120', 'p26', 'pm10'], 'filename': 'pulc_demo_imgs/traffic_sign/100999_83928.jpg'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +All datasets used in this case are open source data. Train data is the subset of [MS-COCO](https://cocodataset.org/#overview) training data. And the validation data is the subset of [Object365](https://www.objects365.org/overview.html) training data. ImageNet_val is [ImageNet-1k](https://www.image-net.org/) validation data. + +The dataset used in this case is based on the [Tsinghua-Tencent 100K dataset (CC-BY-NC license), TT100K](https://cg.cs.tsinghua.edu.cn/traffic-sign/) randomly expanded and cropped according to the bounding box. + + + +#### 3.2.2 Getting Dataset + +The processing to `TT00K` includes randomly expansion and cropping, details are shown below. + +```python +def get_random_crop_box(xmin, ymin, xmax, ymax, img_height, img_width, ratio=1.0): + h = ymax - ymin + w = ymax - ymin + + xmin_diff = random.random() * ratio * min(w, xmin/ratio) + ymin_diff = random.random() * ratio * min(h, ymin/ratio) + xmax_diff = random.random() * ratio * min(w, (img_width-xmin-1)/ratio) + ymax_diff = random.random() * ratio * min(h, (img_height-ymin-1)/ratio) + + new_xmin = round(xmin - xmin_diff) + new_ymin = round(ymin - ymin_diff) + new_xmax = round(xmax + xmax_diff) + new_ymax = round(ymax + ymax_diff) + + return new_xmin, new_ymin, new_xmax, new_ymax +``` + +Some image of the processed dataset is as follows: + +
+ +
+ +You can also download the data processed directly. And the process script file `deal.py` is also included. + +``` +cd path_to_PaddleClas +``` + +Enter the `dataset/` directory, download and unzip the dataset. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/traffic_sign.tar +tar -xf traffic_sign.tar +cd ../ +``` + +The datas under `traffic_sign` directory: + +``` +traffic_sign +├── train +│ ├── 0_62627.jpg +│ ├── 100000_89031.jpg +│ ├── 100001_89031.jpg +... +├── test +│ ├── 100423_2315.jpg +│ ├── 100424_2315.jpg +│ ├── 100425_2315.jpg +... +├── other +│ ├── 100603_3422.jpg +│ ├── 100604_3422.jpg +... +├── label_list_train.txt +├── label_list_test.txt +├── label_list_other.txt +├── label_list_train_for_distillation.txt +├── label_list_train.txt.debug +├── label_list_test.txt.debug +├── label_name_id.txt +├── deal.py +``` + +Where `train/` and `test/` are training set and validation set respectively. The `label_list_train.txt` and `label_list_test.txt` are label files of training data and validation data respectively. The file `label_list_train.txt.debug` and `label_list_test.txt.debug` are subset of `train_list.txt` and `val_list.txt` respectively. `other` would be used for SKL-UGI knowledge distillation, and its label file is `label_list_train_for_distillation.txt`. + +**Note**: + +* About the contents format of `label_list_train.txt` and `label_list_train.txt`, please refer to [Description about Classification Dataset in PaddleClas](../data_preparation/classification_dataset_en.md). +* About the `label_list_train_for_distillation.txt`, please refer to [Knowledge Distillation Label](../advanced_tutorials/distillation/distillation_en.md). + + + +### 3.3 Training + +The details of training config in `ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml +``` + +The best metric of validation data is between `98.0` and `98.2`. There would be fluctuations because the data size is small. + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model +``` + +The results: + +``` +99603_17806.jpg: class id(s): [216, 145, 49, 207, 169], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['pm20', 'pm30', 'pm40', 'pl25', 'pm15'] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `deploy/images/PULC/traffic_sign/99603_17806.jpg`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. + + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +The best metric of validation data is about `98.59%`. The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml`, the teacher model is `ResNet101_vd`, the student model is `PPLCNet_x1_0` and the additional unlabeled training data is validation data of ImageNet1k. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +The best metric is about `98.35%`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_traffic_sign_infer +``` + +After running above command, the inference model files would be saved in `deploy/models/PPLCNet_x1_0_traffic_sign_infer`, as shown below: + +``` +├── PPLCNet_x1_0_traffic_sign_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/traffic_sign_infer.tar && tar -xf traffic_sign_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── traffic_sign_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify traffic sign about the image `./images/PULC/traffic_sign/99603_17806.jpg`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/traffic_sign/inference_traffic_sign.yaml +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/traffic_sign/inference_traffic_sign.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +99603_17806.jpg: class id(s): [216, 145, 49, 207, 169], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['pm20', 'pm30', 'pm40', 'pl25', 'pm15'] +``` + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/traffic_sign/inference_traffic_sign.yaml -o Global.infer_imgs="./images/PULC/traffic_sign/" +``` + +All prediction results will be printed, as shown below. + +``` +100999_83928.jpg: class id(s): [182, 179, 162, 128, 24], score(s): [0.99, 0.01, 0.00, 0.00, 0.00], label_name(s): ['pl110', 'pl100', 'pl120', 'p26', 'pm10'] +99603_17806.jpg: class id(s): [216, 145, 49, 24, 169], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['pm20', 'pm30', 'pm40', 'pm10', 'pm15'] +``` + +About the `label_name` details, please refer to `dataset/traffic_sign/report.pdf`. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/PULC/PULC_train_en.md b/docs/en/PULC/PULC_train_en.md new file mode 100644 index 0000000000000000000000000000000000000000..9f94265e9ffb38f40633c671b0f6a60846f8cd08 --- /dev/null +++ b/docs/en/PULC/PULC_train_en.md @@ -0,0 +1,246 @@ +## Practical Ultra Lightweight Classification scheme PULC +------ + + +## Catalogue + +- [1. Introduction of PULC solution](#1) +- [2. Data preparation](#2) + - [2.1 Dataset format description](#2.1) + - [2.2 Annotation file generation method](#2.2) +- [3. Training with standard classification configuration](#3) + - [3.1 PP-LCNet as backbone](#3.1) + - [3.2 SSLD pretrained model](#3.2) + - [3.3 EDA strategy](#3.3) + - [3.4 SKL-UGI knowledge distillation](#3.4) + - [3.5 Summary](#3.5) +- [4. Hyperparameters Searching](#4) + - [4.1 Search based on default configuration](#4.1) + - [4.2 Custom search configuration](#4.2) + + + +### 1. Introduction of PULC solution + +Image classification is one of the basic algorithms of computer vision, and it is also the most common algorithm in enterprise applications, and further, it is also an important part of many CV applications. In recent years, the backbone network model has developed rapidly, and the accuracy record of ImageNet has been continuously refreshed. However, the performance of these models in practical scenarios is sometimes unsatisfactory. On the one hand, models with high precision tend to have large storage and slow inference speed, which are often difficult to meet actual deployment requirements; on the other hand, after selecting a suitable model, experienced engineers are often required to adjust parameters, which is time-consuming and labor-intensive. In order to solve the problems of enterprise application and make the training and parameter adjustment of classification models easier, PaddleClas summarized and launched a Practical Ultra Lightweight Classification (PULC) solution. PULC integrates various state-of-the-art algorithms such as backbone network, data augmentation and distillation, etc., and finally can automatically obtain a lightweight and high-precision image classification model. + + +The PULC solution has been verified to be effective in many scenarios, such as human-related scenarios, car-related scenarios, and OCR-related scenarios. With an ultra-lightweight model, the accuracy close to SwinTransformer can be achieved, and the inference speed can be 40+ times faster. + +
+ +
+ +The solution mainly includes 4 parts, namely: PP-LCNet lightweight backbone network, SSLD pre-trained model, Ensemble Data Augmentation (EDA) and SKL-UGI knowledge distillation algorithm. In addition, we also adopt the method of hyperparameters searching to efficiently optimize the hyperparameters in training. Below, we take the person exists or not scene as an example to illustrate the solution. + +**Note**:For some specific scenarios, we provide basic training documents for reference, such as [person exists or not classification model](PULC_person_exists_en.md), etc. You can find these documents [here](./PULC_model_list_en.md). If the methods in these documents do not meet your needs, or if you need a custom training task, you can refer to this document. + + + +### 2. Data preparation + + + +#### 2.1 Dataset format description + +PaddleClas uses the `txt` format file to specify the training set and validation set. Take the person exists or not scene as an example, you need to specify `train_list.txt` and `val_list.txt` as the data labels of the training set and validation set. The format is in the form of as follows: + +``` +# Each line uses "space" to separate the image path and label +train/1.jpg 0 +train/10.jpg 1 +... +``` + +If you want to get more information about common classification datasets, you can refer to the document [PaddleClas Classification Dataset Format Description](../data_preparation/classification_dataset_en.md). + + + + +#### 2.2 Annotation file generation method + +If you already have the data in the actual scene, you can label it according to the format in the previous section. Here, we provide a script to quickly generate annotation files. You only need to put different categories of data in folders and run the script to generate annotation files. + +First, assume that the path where you store the data is `./train`, `train/` contains the data of each category, the category number starts from 0, and the folder of each category contains specific image data. + +```shell +train +├── 0 +│   ├── 0.jpg +│   ├── 1.jpg +│   └── ... +└── 1 + ├── 0.jpg + ├── 1.jpg + └── ... +└── ... +``` + +```shell +tree -r -i -f train | grep -E "jpg|JPG|jpeg|JPEG|png|PNG" | awk -F "/" '{print $0" "$2}' > train_list.txt +``` + +Among them, if more image name suffixes are involved, the content after `grep -E` can be added, and the `2` in `$2` is the level of the category number folder. + +**Note:** The above is an introduction to the method of dataset acquisition and generation. Here you can directly download the person exists or not scene data to quickly start the experience. + + +Go to the PaddleClas directory. + +``` +cd path_to_PaddleClas +``` + +Go to the `dataset/` directory, download and unzip the data. + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/person_exists.tar +tar -xf person_exists.tar +cd ../ +``` + + + +### 3. Training with standard classification configuration + + + +#### 3.1 PP-LCNet as backbone + +PULC adopts the lightweight backbone network PP-LCNet, which is 50% faster than other networks with the same accuracy. You can view the detailed introduction of the backbone network in [PP-LCNet Introduction](../models/PP-LCNet_en.md). + +The command to train with PP-LCNet is: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml +``` + +For performance comparison, we also provide configuration files for the large model SwinTransformer_tiny and the lightweight model MobileNetV3_small_x0_35, which you can train with the command: + +SwinTransformer_tiny: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml +``` + +MobileNetV3_small_x0_35: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml +``` + + +The accuracy of the trained models is compared in the following table. + +| Model | Tpr(%) | Latency(ms) | Storage Size(M) | Strategy | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 95.69 | 95.30 | 107 | Use ImageNet pretrained model| +| MobileNetV3_small_x0_35 | 68.25 | 2.85 | 1.6 | Use ImageNet pretrained model | +| PPLCNet_x1_0 | 89.57 | 2.12 | 6.5 | Use ImageNet pretrained model | + +It can be seen that PP-LCNet is much faster than SwinTransformer, but the accuracy is also slightly lower. Below we improve the accuracy of the PP-LCNet model through a series of optimizations. + + + +#### 3.2 SSLD pretrained model + +SSLD is a semi-supervised distillation algorithm developed by Baidu. On the ImageNet dataset, the model accuracy can be improved by 3-7 points. You can find a detailed introduction in [SSLD introduction](../advanced_tutorials/distillation/distillation_en.md). We found that using SSLD pre-trained weights can effectively improve the accuracy of the applied classification model. In addition, using a smaller resolution in training can effectively improve model accuracy. At the same time, we also optimize the learning rate. +Based on the above three improvements, the accuracy of our trained model is 92.1%, an increase of 2.6%. + + + +#### 3.3 EDA strategy + +Data augmentation is a commonly used optimization strategy in vision algorithms, which can significantly improve the accuracy of the model. In addition to the traditional RandomCrop, RandomFlip, etc. methods, we also apply RandomAugment and RandomErasing. You can find a detailed introduction at [Data Augmentation Introduction](../advanced_tutorials/DataAugmentation_en.md). +Since these two kinds of data augmentation greatly modify the picture, making the classification task more difficult, it may lead to under-fitting of the model on some datasets. We will set the probability of enabling these two methods in advance. +Based on the above improvements, we obtained a model accuracy of 93.43%, an increase of 1.3%. + + + +#### 3.4 SKL-UGI knowledge distillation + +Knowledge distillation is a method that can effectively improve the accuracy of small models. You can find a detailed introduction in [Introduction to Knowledge Distillation](../advanced_tutorials/distillation/distillation_en.md). We choose ResNet101_vd as the teacher model for distillation. In order to adapt to the distillation process, we also adjust the learning rate of different stages of the network here. Based on the above improvements, we trained the model to get a model accuracy of 95.6%, an increase of 1.4%. + + + +#### 3.5 Summary + +After the optimization of the above methods, the final accuracy of PP-LCNet reaches 95.6%, reaching the accuracy level of the large model. We summarize the experimental results in the following table: + +| Model | Tpr(%) | Latency(ms) | Storage Size(M) | Strategy | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 95.69 | 95.30 | 107 | Use ImageNet pretrained model | +| MobileNetV3_small_x0_35 | 68.25 | 2.85 | 1.6 | Use ImageNet pretrained model | +| PPLCNet_x1_0 | 89.57 | 2.12 | 6.5 | Use ImageNet pretrained model | +| PPLCNet_x1_0 | 92.10 | 2.12 | 6.5 | Use SSLD pretrained model | +| PPLCNet_x1_0 | 93.43 | 2.12 | 6.5 | Use SSLD pretrained model + EDA Strategy| +| PPLCNet_x1_0 | 95.60 | 2.12 | 6.5 | Use SSLD pretrained model + EDA Strategy + SKL-UGI knowledge distillation | + +We also used the same optimization strategy in the other 8 scenarios and got the following results: + +| scenarios | large model | large model metrics(%) | small model | small model metrics(%) | +|----------|----------|----------|----------|----------| +| Pedestrian Attribute Classification | Res2Net200_vd | 81.25 | PPLCNet_x1_0 | 78.59 | +| Classification of Wheather Wearing Safety Helmet | Res2Net200_vd| 98.92 | PPLCNet_x1_0 |99.38 | +| Traffic Sign Classification | SwinTransformer_tiny | 98.11 | PPLCNet_x1_0 | 98.35 | +| Vehicle Attribute Classification | Res2Net200_vd_26w_4s | 91.36 | PPLCNet_x1_0 | 90.81 | +| Car Exists Classification | SwinTransformer_tiny | 97.71 | PPLCNet_x1_0 | 95.92 | +| Text Image Orientation Classification | SwinTransformer_tiny |99.12 | PPLCNet_x1_0 | 99.06 | +| Text-line Orientation Classification | SwinTransformer_tiny | 93.61 | PPLCNet_x1_0 | 96.01 | +| Language Classification | SwinTransformer_tiny | 98.12 | PPLCNet_x1_0 | 99.26 | + + +It can be seen from the results that the PULC scheme can improve the model accuracy in multiple application scenarios. Using the PULC scheme can greatly reduce the workload of model optimization and quickly obtain models with higher accuracy. + + + + +### 4. Hyperparameters Searching + +In the above training process, we adjusted parameters such as learning rate, data augmentation probability, and stage learning rate mult list. The optimal values of these parameters may not be the same in different scenarios. We provide a quick hyperparameters searching script to automate the process of hyperparameter tuning. This script traverses the parameters in the search value list to replace the parameters in the default configuration, then trains in sequence, and finally selects the parameters corresponding to the model with the highest accuracy as the search result. + + + +#### 4.1 Search based on default configuration + +The configuration file [search.yaml](../../../ppcls/configs/PULC/person_exists/search.yaml) defines the configuration of hyperparameters searching in person exists or not scenarios. Use the following commands to complete hyperparameters searching. + +```bash +python3 tools/search_strategy.py -c ppcls/configs/PULC/person_exists/search.yaml +``` + +**Note**:Regarding the search part, we are also constantly improving, so stay tuned. + + + +#### 4.2 Custom search configuration + + +You can also modify the configuration of hyperparameters searching based on training results or your parameter tuning experience. + +Modify the `search_values` field in `lrs` to modify the list of learning rate search values; + +Modify the `search_values` field in `resolutions` to modify the search value list of resolutions; + +Modify the `search_values` field in `ra_probs` to modify the search value list of RandAugment activation probability; + +Modify the `search_values` field in `re_probs` to modify the search value list of RnadomErasing on probability; + +Modify the `search_values` field in `lr_mult_list` to modify the lr_mult search value list; + +Modify the `search_values` field in `teacher` to modify the search list of the teacher model. + +After the search is completed, the final results will be generated in `output/search_person_exists`, where, except for `search_res`, the directories in `output/search_person_exists` are the weights and training log files of the results of the corresponding hyperparameters of each search training, ` search_res` corresponds to the result of knowledge distillation, that is, the final model. The weights of the model are stored in `output/output_dir/search_person_exists/DistillationModel/best_model_student.pdparams`. diff --git a/docs/en/PULC/PULC_vehicle_attribute_en.md b/docs/en/PULC/PULC_vehicle_attribute_en.md new file mode 100644 index 0000000000000000000000000000000000000000..47d7c963e9de6e4bde9fd3338830611e59b60695 --- /dev/null +++ b/docs/en/PULC/PULC_vehicle_attribute_en.md @@ -0,0 +1,481 @@ +# PULC Recognition Model of Vehicle Attribute + +------ + +## Catalogue + +- [1. Introduction](#1) +- [2. Quick Start](#2) + - [2.1 PaddlePaddle Installation](#2.1) + - [2.2 PaddleClas Installation](#2.2) + - [2.3 Prediction](#2.3) +- [3. Training, Evaluation and Inference](#3) + - [3.1 Installation](#3.1) + - [3.2 Dataset](#3.2) + - [3.2.1 Dataset Introduction](#3.2.1) + - [3.2.2 Getting Dataset](#3.2.2) + - [3.3 Training](#3.3) + - [3.4 Evaluation](#3.4) + - [3.5 Inference](#3.5) +- [4. Model Compression](#4) + - [4.1 SKL-UGI Knowledge Distillation](#4.1) + - [4.1.1 Teacher Model Training](#4.1.1) + - [4.1.2 Knowledge Distillation Training](#4.1.2) +- [5. SHAS](#5) +- [6. Inference Deployment](#6) + - [6.1 Getting Paddle Inference Model](#6.1) + - [6.1.1 Exporting Paddle Inference Model](#6.1.1) + - [6.1.2 Downloading Inference Model](#6.1.2) + - [6.2 Prediction with Python](#6.2) + - [6.2.1 Image Prediction](#6.2.1) + - [6.2.2 Images Prediction](#6.2.2) + - [6.3 Deployment with C++](#6.3) + - [6.4 Deployment as Service](#6.4) + - [6.5 Deployment on Mobile](#6.5) + - [6.6 Converting To ONNX and Deployment](#6.6) + + + +## 1. Introduction + +This case provides a way for users to quickly build a lightweight, high-precision and practical classification model of vehicle attribute using PaddleClas PULC (Practical Ultra Lightweight image Classification). The model can be widely used in Vehicle identification, road monitoring and other scenarios. + +The following table lists the relevant indicators of the model. The first three lines means that using Res2Net200_vd_26w_4s, ResNet50 and MobileNetV3_small_x0_35 as the backbone to training. The fourth to seventh lines means that the backbone is replaced by PPLCNet, additional use of EDA strategy and additional use of EDA strategy and SKL-UGI knowledge distillation strategy. + + +| Backbone | mA(%) | Latency(ms) | Size(M) | Training Strategy | +|-------|-----------|----------|---------------|---------------| +| Res2Net200_vd_26w_4s | 91.36 | 79.46 | 293 | using ImageNet pretrained | +| ResNet50 | 89.98 | 12.83 | 92 | using ImageNet pretrained | +| MobileNetV3_small_x0_35 | 87.41 | 2.91 | 2.8 | using ImageNet pretrained | +| PPLCNet_x1_0 | 89.57 | 2.36 | 7.2 | using ImageNet pretrained | +| PPLCNet_x1_0 | 90.07 | 2.36 | 7.2 | using SSLD pretrained | +| PPLCNet_x1_0 | 90.59 | 2.36 | 7.2 | using SSLD pretrained + EDA strategy| +| PPLCNet_x1_0 | 90.81 | 2.36 | 7.2 | using SSLD pretrained + EDA strategy + SKL-UGI knowledge distillation strategy| + + +It can be seen from the table that the ma metric is higher when the backbone is Res2Net200_vd_26w_4s, but the inference speed is slower. After replacing the backbone with the lightweight model MobileNetV3_small_x0_35, the speed can be greatly improved, but the ma metric drops significantly. When the backbone is replaced by PPLCNet_x1_0, the ma metric is increased by 2 percentage points, and the speed is also increased by about 23%. On this basis, after using the SSLD pre-training model, the ma metric can be improved by about 0.5 percentage points without changing the inference speed. Further, when the EDA strategy is integrated, the ma metric can be improved by another 0.52 percentage points. Finally, using After SKL-UGI knowledge distillation, the ma metric can continue to improve by 0.23 percentage points. At this time, the ma metric of PPLCNet_x1_0 is only 0.55 percentage points away from Res2Net200_vd_26w_4s, but it is 32 times faster. The training method and deployment instructions of PULC will be introduced in detail below. + + +**Note**: + +* The Latency is tested on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz. The MKLDNN is enabled and the number of threads is 10. +* About PP-LCNet, please refer to [PP-LCNet Introduction](../models/PP-LCNet_en.md) and [PP-LCNet Paper](https://arxiv.org/abs/2109.15099). + + + +## 2. Quick Start + + + +### 2.1 PaddlePaddle Installation + +- Run the following command to install if CUDA9 or CUDA10 is available. + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- Run the following command to install if GPU device is unavailable. + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +Please refer to [PaddlePaddle Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/en/install/pip/linux-pip_en.html) for more information about installation, for examples other versions. + + + +### 2.2 PaddleClas wheel Installation + +The command of PaddleClas installation as bellow: + +```bash +pip3 install paddleclas +``` + + + +### 2.3 Prediction + +First, please click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download and unzip to get the test demo images. + + +* Prediction with CLI + +```bash +paddleclas --model_name=vehicle_attribute --infer_imgs=pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg +``` + +Results: +``` +>>> result +attributes: Color: (yellow, prob: 0.9893476963043213), Type: (hatchback, prob: 0.9734097719192505), output: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], filename: pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg +Predict complete! +``` + +**Note**: If you want to test other images, only need to specify the `--infer_imgs` argument, and the directory containing images is also supported. + +* Prediction in Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="vehicle_attribute") +result = model.predict(input_data="pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg") +print(next(result)) +``` + +**Note**: The `result` returned by `model.predict()` is a generator, so you need to use the `next()` function to call it or `for` loop to loop it. And it will predict with `batch_size` size batch and return the prediction results when called. The default `batch_size` is 1, and you also specify the `batch_size` when instantiating, such as `model = paddleclas.PaddleClas(model_name="vehicle_attribute", batch_size=2)`. The result of demo above: + +``` +>>> result +[{'attributes': 'Color: (yellow, prob: 0.9893476963043213), Type: (hatchback, prob: 0.9734097719192505)', 'output': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 'filename': 'pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg'}] +``` + + + +## 3. Training, Evaluation and Inference + + + +### 3.1 Installation + +Please refer to [Installation](../installation/install_paddleclas_en.md) to get the description about installation. + + + +### 3.2 Dataset + + + +#### 3.2.1 Dataset Introduction + +The data used in this case is the [pa100k dataset](https://www.v7labs.com/open-datasets/pa-100k). + + + +#### 3.2.2 Getting Dataset + + +Part of the data visualization is shown below. + +
+ +
+ +First, apply for and download data from [VeRi dataset official website](https://www.v7labs.com/open-datasets/veri-dataset), put it in the `dataset` directory of PaddleClas, the dataset directory name is `VeRi `, use the following command to enter the folder. + + +```shell +cd PaddleClas/dataset/VeRi/ +``` + +Then use the following code to convert the label (you can execute the following command in the python terminal, or you can write it to a file and run the file using `python3 convert.py`). + +```python +import os +from xml.dom.minidom import parse + +vehicleids = [] + +def convert_annotation(input_fp, output_fp): + in_file = open(input_fp) + list_file = open(output_fp, 'w') + tree = parse(in_file) + + root = tree.documentElement + + for item in root.getElementsByTagName("Item"): + label = ['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0'] + if item.hasAttribute("imageName"): + name = item.getAttribute("imageName") + if item.hasAttribute("vehicleID"): + vehicleid = item.getAttribute("vehicleID") + if vehicleid not in vehicleids : + vehicleids.append(vehicleid) + vid = vehicleids.index(vehicleid) + if item.hasAttribute("colorID"): + colorid = int (item.getAttribute("colorID")) + label[colorid-1] = '1' + if item.hasAttribute("typeID"): + typeid = int (item.getAttribute("typeID")) + label[typeid+9] = '1' + label = ','.join(label) + list_file.write(os.path.join('image_train', name) + "\t" + label + "\n") + + list_file.close() + +convert_annotation('train_label.xml', 'train_list.txt') #imagename vehiclenum colorid typeid +convert_annotation('test_label.xml', 'test_list.txt') +``` + + +After executing the above command, the `VeRi` directory has the following data: + +``` +VeRi +├── image_train +│ ├── 0001_c001_00016450_0.jpg +│ ├── 0001_c001_00016460_0.jpg +│ ├── 0001_c001_00016470_0.jpg +... +├── image_test +│ ├── 0002_c002_00030600_0.jpg +│ ├── 0002_c002_00030605_1.jpg +│ ├── 0002_c002_00030615_1.jpg +... +... +├── train_list.txt +├── test_list.txt +├── train_label.xml +├── test_label.xml +``` + +where `train/` and `test/` are the training set and validation set, respectively. `train_list.txt` and `test_list.txt` are the converted label files for training and validation sets, respectively. + + + + +### 3.3 Training + +The details of training config in `./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml`. The command about training as follows: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml +``` + +The best metric for the validation set is around `90.59%` (the dataset is small and generally fluctuates around 0.3%). + + + + +### 3.4 Evaluation + +After training, you can use the following commands to evaluate the model. + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +Among the above command, the argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. + + + +### 3.5 Inference + +After training, you can use the model that trained to infer. Command is as follow: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model +``` + +The results: + +``` +[{'attr': 'Color: (yellow, prob: 0.9893478155136108), Type: (hatchback, prob: 0.9734100103378296)', 'pred': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 'file_name': './deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg'}] +``` + +**Note**: + +* Among the above command, argument `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` specify the path of the best model weight file. You can specify other path if needed. +* The default test image is `./deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg`. And you can test other image, only need to specify the argument `-o Infer.infer_imgs=path_to_test_image`. + + + +## 4. Model Compression + + + +### 4.1 SKL-UGI Knowledge Distillation + +SKL-UGI is a simple but effective knowledge distillation algrithem proposed by PaddleClas. + + + + + + +#### 4.1.1 Teacher Model Training + +Training the teacher model with hyperparameters specified in `ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +The best metric for the validation set is around `91.60%`. The best teacher model weight would be saved in file `output/ResNet101_vd/best_model.pdparams`. + + + +#### 4.1.2 Knowledge Distillation Training + +The training strategy, specified in training config file `ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml`, the teacher model is `ResNet101_vd`, the student model is `PPLCNet_x1_0`. The command is as follow: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +The best metric for the validation set is around `90.81%`. The best student model weight would be saved in file `output/DistillationModel/best_model_student.pdparams`. + + + +## 5. Hyperparameters Searching + +The hyperparameters used by [3.2 section](#3.2) and [4.1 section](#4.1) are according by `Hyperparameters Searching` in PaddleClas. If you want to get better results on your own dataset, you can refer to [Hyperparameters Searching](PULC_train_en.md#4) to get better hyperparameters. + +**Note**: This section is optional. Because the search process will take a long time, you can selectively run according to your specific. If not replace the dataset, you can ignore this section. + + + +## 6. Inference Deployment + + + +### 6.1 Getting Paddle Inference Model + +Paddle Inference is the original Inference Library of the PaddlePaddle, provides high-performance inference for server deployment. And compared with directly based on the pretrained model, Paddle Inference can use tools to accelerate prediction, so as to achieve better inference performance. Please refer to [Paddle Inference](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html) for more information. + +Paddle Inference need Paddle Inference Model to predict. Two process provided to get Paddle Inference Model. If want to use the provided by PaddleClas, you can download directly, click [Downloading Inference Model](#6.1.2). + + + +### 6.1.1 Exporting Paddle Inference Model + +The command about exporting Paddle Inference Model is as follow: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_vehicle_attribute_infer +``` + +After running above command, the inference model files would be saved in `PPLCNet_x1_0_vehicle_attribute_infer`, as shown below: + +``` +└── PPLCNet_x1_0_vehicle_attribute_infer + ├── inference.pdiparams + ├── inference.pdiparams.info + └── inference.pdmodel +``` + +**Note**: The best model is from knowledge distillation training. If knowledge distillation training is not used, the best model would be saved in `output/PPLCNet_x1_0/best_model.pdparams`. + + + +### 6.1.2 Downloading Inference Model + +You can also download directly. + +``` +cd deploy/models +# download the inference model and decompression +wget https://paddleclas.bj.bcebos.com/models/PULC/vehicle_attribute_infer.tar && tar -xf vehicle_attribute_infer.tar +``` + +After decompression, the directory `models` should be shown below. + +``` +├── vehicle_attribute_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 Prediction with Python + + + +#### 6.2.1 Image Prediction + +Return the directory `deploy`: + +``` +cd ../ +``` + +Run the following command to classify whether there are human in the image `../images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg`. + +```shell +# Use the following command to predict with GPU. +python3.7 python/predict_cls.py -c configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml -o Global.use_gpu=True +# Use the following command to predict with CPU. +python3.7 python/predict_cls.py -c configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml -o Global.use_gpu=False +``` + +The prediction results: + +``` +0002_c002_00030670_0.jpg: {'attributes': 'Color: (yellow, prob: 0.9893478155136108), Type: (hatchback, prob: 0.9734099507331848)', 'output': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]} +``` + + + + +#### 6.2.2 Images Prediction + +If you want to predict images in directory, please specify the argument `Global.infer_imgs` as directory path by `-o Global.infer_imgs`. The command is as follow. + +```shell +# Use the following command to predict with GPU. If want to replace with CPU, you can add argument -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml -o Global.infer_imgs="./images/PULC/vehicle_attribute/" +``` + +All prediction results will be printed, as shown below. + +``` +0002_c002_00030670_0.jpg: {'attributes': 'Color: (yellow, prob: 0.9893476963043213), Type: (hatchback, prob: 0.9734097719192505)', 'output': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]} +0014_c012_00040750_0.jpg: {'attributes': 'Color: (red, prob: 0.999872088432312), Type: (sedan, prob: 0.999976634979248)', 'output': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]} +``` + +Among the prediction results above, `someone` means that there is a human in the image, `nobody` means that there is no human in the image. + + + +### 6.3 Deployment with C++ + +PaddleClas provides an example about how to deploy with C++. Please refer to [Deployment with C++](../inference_deployment/cpp_deploy_en.md). + + + +### 6.4 Deployment as Service + +Paddle Serving is a flexible, high-performance carrier for machine learning models, and supports different protocol, such as RESTful, gRPC, bRPC and so on, which provides different deployment solutions for a variety of heterogeneous hardware and operating system environments. Please refer [Paddle Serving](https://github.com/PaddlePaddle/Serving) for more information. + +PaddleClas provides an example about how to deploy as service by Paddle Serving. Please refer to [Paddle Serving Deployment](../inference_deployment/paddle_serving_deploy_en.md). + + + +### 6.5 Deployment on Mobile + +Paddle-Lite is an open source deep learning framework that designed to make easy to perform inference on mobile, embeded, and IoT devices. Please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) for more information. + +PaddleClas provides an example of how to deploy on mobile by Paddle-Lite. Please refer to [Paddle-Lite deployment](../inference_deployment/paddle_lite_deploy_en.md). + + + +### 6.6 Converting To ONNX and Deployment + +Paddle2ONNX support convert Paddle Inference model to ONNX model. And you can deploy with ONNX model on different inference engine, such as TensorRT, OpenVINO, MNN/TNN, NCNN and so on. About Paddle2ONNX details, please refer to [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX). + +PaddleClas provides an example of how to convert Paddle Inference model to ONNX model by paddle2onnx toolkit and predict by ONNX model. You can refer to [paddle2onnx](../../../deploy/paddle2onnx/readme_en.md) for deployment details. diff --git a/docs/en/algorithm_introduction/ImageNet_models_en.md b/docs/en/algorithm_introduction/ImageNet_models_en.md index b78061267b65d44a5c175368a2575bc2fc277f50..5a8ba0ac71ea4d4f4b0d89e7a182163d17a83f5c 100644 --- a/docs/en/algorithm_introduction/ImageNet_models_en.md +++ b/docs/en/algorithm_introduction/ImageNet_models_en.md @@ -541,9 +541,9 @@ The accuracy and speed indicators of MobileViT series models are shown in the fo | Model | Top-1 Acc | Top-5 Acc | time(ms)
bs=1 | time(ms)
bs=4 | time(ms)
bs=8 | FLOPs(M) | Params(M) | Pretrained Model Download Address | Inference Model Download Address | | ---------- | --------- | --------- | ---------------- | ---------------- | -------- | --------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| MobileViT_XXS | 0.6867 | 0.8878 | - | - | - | 1849.35 | 5.59 | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams) | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XXS_infer.tar) | +| MobileViT_XXS | 0.6867 | 0.8878 | - | - | - | 337.24 | 1.28 | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams) | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XXS_infer.tar) | | MobileViT_XS | 0.7454 | 0.9227 | - | - | - | 930.75 | 2.33 | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams) | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XS_infer.tar) | -| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 337.24 | 1.28 | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) | +| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 1849.35 | 5.59 | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) | diff --git a/docs/en/algorithm_introduction/reid.md b/docs/en/algorithm_introduction/reid.md new file mode 100644 index 0000000000000000000000000000000000000000..c4c5ac59e5003fad8e25fe7e2e9824bb3819532b --- /dev/null +++ b/docs/en/algorithm_introduction/reid.md @@ -0,0 +1,363 @@ +English | [简体中文](../../zh_CN/algorithm_introduction/reid.md) + +# ReID pedestrian re-identification + +## Contents + +- [1. Introduction to algorithms/application scenarios](#1-introduction-to-algorithmsapplication-scenarios) +- [2. Common datasets and metrics](#2-common-datasets-and-metrics) + - [2.1 Common datasets](#21-common-datasets) + - [2.2 Common metric](#22-common-metric) +- [3. ReID algorithm](#3-reid-algorithm) + - [3.1 ReID strong-baseline](#31-reid-strong-baseline) + - [3.1.1 Principle introduction](#311-principle-introduction) + - [3.1.2 Accuracy metrics](#312-accuracy-metrics) + - [3.1.3 Data Preparation](#313-data-preparation) + - [3.1.4 Model training](#314-model-training) +- [4. Model evaluation and inference deployment](#4-model-evaluation-and-inference-deployment) + - [4.1 Model Evaluation](#41-model-evaluation) + - [4.2 Model Inference](#42-model-inference) + - [4.2.1 Inference model preparation](#421-inference-model-preparation) + - [4.2.2 Inference based on Python prediction engine](#422-inference-based-on-python-prediction-engine) + - [4.2.3 Inference based on C++ prediction engine](#423-inference-based-on-c-prediction-engine) + - [4.3 Service deployment](#43-service-deployment) + - [4.4 Lite deployment](#44-lite-deployment) + - [4.5 Paddle2ONNX Model Conversion and Prediction](#45-paddle2onnx-model-conversion-and-prediction) +- [5. Summary](#5-summary) + - [5.1 Method summary and comparison](#51-method-summary-and-comparison) + - [5.2 Usage advice/FAQ](#52-usage-advicefaq) +- [6. References](#6-references) + +### 1. Introduction to algorithms/application scenarios + +Person re-identification (Re-ID), also known as person re-identification, has been widely studied as a cross-shot pedestrian retrieval problem. Given a pedestrian image captured by a certain camera, the goal is to determine whether the pedestrian has appeared in images captured by different cameras or in different time periods. The given pedestrian data can be a picture, a video frame, or even a text description. In recent years, the application demand of this technology in the field of public safety has been increasing, and the influence of pedestrian re-identification in intelligent monitoring technology is also increasing. + +At present, pedestrian re-identification is still a challenging task, especially the problems of different viewpoints, resolutions, illumination changes, occlusions, multi-modalities, as well as complex camera environment and background, labeling data noise, etc. There is great uncertainty. In addition, when the actual landing, the shooting camera may change, the large-scale retrieval database, the distribution shift of the data set, the unknown scene, the incremental update of the model, and the change of the clothing of the retrieval person, which also increases a lot of difficulties. + +Early work on person re-identification mainly focused on hand-designed feature extraction operators, including adding human pose features, or learning distance metric functions. With the development of deep learning technology, pedestrian recognition has also made great progress. In general, the whole process of pedestrian re-identification includes 5 steps: 1) data collection, 2) pedestrian location box annotation, 3) pedestrian category annotation, 4) model training, and 5) pedestrian retrieval (model testing). + + + +### 2. Common datasets and metrics + +#### 2.1 Common datasets + +| Dataset | #ID | #Image | #cam | +| :---------- | :----: | :----: | :---: | +| VIPeR | 632 | 1264 | 2 | +| iLIDS | 119 | 476 | 2 | +| GRID | 250 | 1275 | 8 | +| PRID2011 | 200 | 1134 | 2 | +| CUHK01 | 971 | 3884 | 2 | +| CUHK02 | 1816 | 7264 | 10 | +| CUHK03 | 1467 | 13164 | 2 | +| Market-1501 | 1501 | 32668 | 6 | +| DukeMTMC | 1404 | 36411 | 8 | +| Airport | 39902 | 39902 | 6 | +| MSMT17 | 126441 | 126441 | 15 | + +#### 2.2 Common metric + +1. CMC curve + + The formula is as follows: + $$ CMC(K)=\frac{1}{N} \sum_{i=1}^{N} \begin{cases} 1, & \text{if $label_i \in Top{K}(result_i)$} \\\\ 0, & \text{if $label_i \notin Top{K}(result_i)$} \end{cases} $$ + + Among them, $N$ is the number of query samples, and $result_i$ is the label set of the retrieval results of each query sample. According to the formula, the CMC curve can be understood as an array composed of Top1-Acc, Top2-Acc, ..., TopK-Acc , which is obviously a monotonic curve. Among them, the common Rank-1 and Top1-Acc metric refer to CMC(1) + +2. mAP + + Assuming that a query sample is used and a set of query results is returned, then according to the following formula, consider the first K query results one by one, and for each K, calculate the precision rate $Precision$ and recall rate $Recall$. + $$\begin{align} precision&=\frac{|\\{同类别图片\\} \cap \\{前K个查询结果\\}|}{|\\{前K个查询结果\\}|} \\\\ recall&=\frac{|\\{同类别图片\\} \cap \\{前K个查询结果\\}|}{|\\{同类别图片\\}|} \end{align}$$ + The obtained multiple groups (Precision, Recall) are converted into a curve graph, and the area enclosed by the curve and the coordinate axis is called Average Precision (AP), + For each sample, calculate its AP value, and then take the average to get the mAP. +### 3. ReID algorithm + +#### 3.1 ReID strong-baseline + +Paper source: [Bag of Tricks and A Strong Baseline for Deep Person Re-identification](https://openaccess.thecvf.com/content_CVPRW_2019/papers/TRMTMCT/Luo_Bag_of_Tricks_and_a_Strong_Baseline_for_Deep_Person_CVPRW_2019_paper.pdf) + + + +##### 3.1.1 Principle introduction + +Based on the commonly used person re-identification model based on ResNet50, the author explores and summarizes the following effective and applicable optimization methods, which greatly improves the indicators on multiple person re-identification datasets. + +1. Warmup: At the beginning of training, let the learning rate gradually increase from a small value and then start to decrease, which is conducive to the stability of gradient descent optimization, so as to find a better parameter model. +2. Random erasing augmentation: Random area erasing, which improves the generalization ability of the model through data augmentation. +3. Label smoothing: Label smoothing to improve the generalization ability of the model. +4. Last stride=1: Set the downsampling of the last stage of the feature extraction module to 1, increase the resolution of the output feature map to retain more details and improve the classification ability of the model. +5. BNNeck: Before the feature vector is input to the classification head, it goes through BNNeck, so that the feature obeys the normal distribution on the surface of the hypersphere, which reduces the difficulty of optimizing IDLoss and TripLetLoss at the same time. +6. Center loss: Give each category a learnable cluster center, and make the intra-class features close to the cluster center during training to reduce intra-class differences and increase inter-class differences. +7. Reranking: Consider the neighbor candidates of the query image during retrieval, optimize the distance matrix according to whether the neighbor images of the candidate object also contain the query image, and finally improve the retrieval accuracy. + +##### 3.1.2 Accuracy metrics + +The following table summarizes the accuracy metrics of the 3 configurations of the recurring ReID strong-baseline on the Market1501 dataset, + +| configuration file | recall@1(\%) | mAP(\%) | reference recall@1(\%) | reference mAP(\%) | pretrained model download address | inference model download address | +| ------------------ | ------------ | ------- | ---------------------- | ----------------- | --------------------------------- | -------------------------------- | +| baseline.yaml | 88.45 | 74.37 | 87.7 | 74.0 | [download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/baseline_pretrained.pdparams) | [ Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/baseline_infer.tar) | +| softmax_triplet.yaml | 94.29 | 85.57 | 94.1 | 85.7 | [download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_pretrained.pdparams) | [ Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/softmax_triplet_infer.tar) | +| softmax_triplet_with_center.yaml | 94.50 | 85.82 | 94.5 | 85.9 | [Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_with_center_pretrained.pdparams) | [ Download link](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/softmax_triplet_with_center_infer.tar) | + +Note: The above reference indicators are obtained by using the author's open source code to train on our equipment for many times. Due to different system environment, torch version, CUDA version and other reasons, there may be slight differences with the indicators provided by the author. + +Next, we mainly take the `softmax_triplet_with_center.yaml` configuration and trained model file as an example to show the process of training, testing, and inference on the Market1501 dataset. + +##### 3.1.3 Data Preparation + +Download the [Market-1501-v15.09.15.zip](https://pan.baidu.com/s/1ntIi2Op?_at_=1654142245770) dataset, extract it to `PaddleClas/dataset/`, and organize it into the following file structure : + + ```shell + PaddleClas/dataset/market1501 + └── Market-1501-v15.09.15/ + ├── bounding_box_test/ # gallery set pictures + ├── bounding_box_train/ # training set image + ├── gt_bbox/ + ├── gt_query/ + ├── query/ # query set image + ├── generate_anno.py + ├── bounding_box_test.txt # gallery set path + ├── bounding_box_train.txt # training set path + ├── query.txt # query set path + └── readme.txt + ``` + +##### 3.1.4 Model training + +1. Execute the following command to start training + + Single card training: + ```shell + python3.7 tools/train.py -c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml + ``` + + Doka training: + + For multi-card training, you need to modify the sampler field of the training configuration to adapt to distributed training, as follows: + ```yaml + sampler: + name: PKSampler + batch_size: 64 + sample_per_id: 4 + drop_last: False + sample_method: id_avg_prob + shuffle: True + ``` + Then execute the following command: + ```shell + export CUDA_VISIBLE_DEVICES=0,1,2,3 + python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" tools/train.py \ + -c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml + ``` + Note: Single card training takes about 1 hour. + +2. View training logs and saved model parameter files + + During the training process, indicator information such as loss will be printed on the screen in real time, and the log file `train.log`, model parameter file `*.pdparams`, optimizer parameter file `*.pdopt` and other contents will be saved to `Global.output_dir` `Under the specified folder, the default is under the `PaddleClas/output/RecModel/` folder. + +### 4. Model evaluation and inference deployment + +#### 4.1 Model Evaluation + +Prepare the `*.pdparams` model parameter file for evaluation. You can use the trained model or the model saved in [2.1.4 Model training] (#214-model training). + +- Take the `latest.pdparams` saved during training as an example, execute the following command to evaluate. + + ```shell + python3.7 tools/eval.py \ + -c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="./output/RecModel/latest" + ``` + +- Take the trained model as an example, download [softmax_triplet_with_center_pretrained.pdparams](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_with_center_pretrained.pdparams) to `PaddleClas/ In the pretrained_models` folder, execute the following command to evaluate. + + ```shell + # download model + cd PaddleClas + mkdir pretrained_models + cd pretrained_models + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_with_center_pretrained.pdparams + cd.. + # Evaluate + python3.7 tools/eval.py \ + -c ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="pretrained_models/softmax_triplet_with_center_pretrained" + ``` + Note: The address filled after `pretrained_model` does not need to be suffixed with `.pdparams`, it will be added automatically when the program is running. + +- View output results + ```log + ... + ... + ppcls INFO: unique_endpoints {''} + ppcls INFO: Found /root/.paddleclas/weights/resnet50-19c8e357_torch2paddle.pdparams + ppcls INFO: gallery feature calculation process: [0/125] + ppcls INFO: gallery feature calculation process: [20/125] + ppcls INFO: gallery feature calculation process: [40/125] + ppcls INFO: gallery feature calculation process: [60/125] + ppcls INFO: gallery feature calculation process: [80/125] + ppcls INFO: gallery feature calculation process: [100/125] + ppcls INFO: gallery feature calculation process: [120/125] + ppcls INFO: Build gallery done, all feat shape: [15913, 2048], begin to eval.. + ppcls INFO: query feature calculation process: [0/27] + ppcls INFO: query feature calculation process: [20/27] + ppcls INFO: Build query done, all feat shape: [3368, 2048], begin to eval.. + ppcls INFO: re_ranking=False + ppcls INFO: [Eval][Epoch 0][Avg]recall1: 0.94507, recall5: 0.98248, mAP: 0.85827 + ``` + The default evaluation log is saved in `PaddleClas/output/RecModel/eval.log`. You can see that the evaluation indicators of the `softmax_triplet_with_center_pretrained.pdparams` model provided by us on the Market1501 dataset are recall@1=0.94507, recall@5=0.98248 , mAP=0.85827 + +- use the re-ranking option to improve the evaluation metrics + + The main idea of ​​re-ranking is to use the relationship between the retrieval libraries to further optimize the retrieval results, and the k-reciprocal algorithm is widely used. Turn on re-ranking during evaluation in PaddleClas to improve the final retrieval accuracy. + This can be enabled by adding `-o Global.re_ranking=True` to the evaluation command as shown below. + ```bash + python3.7 tools/eval.py \ + -c ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="pretrained_models/softmax_triplet_with_center_pretrained" \ + -o Global.re_ranking=True + ``` + + View the output + ```log + ... + ... + ppcls INFO: unique_endpoints {''} + ppcls INFO: Found /root/.paddleclas/weights/resnet50-19c8e357_torch2paddle.pdparams + ppcls INFO: gallery feature calculation process: [0/125] + ppcls INFO: gallery feature calculation process: [20/125] + ppcls INFO: gallery feature calculation process: [40/125] + ppcls INFO: gallery feature calculation process: [60/125] + ppcls INFO: gallery feature calculation process: [80/125] + ppcls INFO: gallery feature calculation process: [100/125] + ppcls INFO: gallery feature calculation process: [120/125] + ppcls INFO: Build gallery done, all feat shape: [15913, 2048], begin to eval.. + ppcls INFO: query feature calculation process: [0/27] + ppcls INFO: query feature calculation process: [20/27] + ppcls INFO: Build query done, all feat shape: [3368, 2048], begin to eval.. + ppcls INFO: re_ranking=True + ppcls WARNING: re_ranking=True, Recallk.descending has been set to False + ppcls WARNING: re_ranking=True,mAP.descending has been set to False + ppcls INFO: using GPU to compute original distance + ppcls INFO: starting re_ranking + ppcls INFO: [Eval][Epoch 0][Avg]recall1: 0.95546, recall5: 0.97743, mAP: 0.94252 + ``` + It can be seen that after re-ranking is enabled, the evaluation indicators are recall@1=0.95546, recall@5=0.97743, and mAP=0.94252. It can be found that the algorithm improves the mAP indicator significantly (0.85827->0.94252). + + **Note**: The computational complexity of re-ranking is currently high, so it is not enabled by default. + +#### 4.2 Model Inference + +##### 4.2.1 Inference model preparation + +You can convert the model file saved during training into an inference model and inference, or use the converted inference model we provide for direct inference + - Convert the model file saved during the training process to an inference model, also take `latest.pdparams` as an example, execute the following command to convert + ```shell + python3.7 tools/export_model.py \ + -c ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="output/RecModel/latest" \ + -o Global.save_inference_dir="./deploy/softmax_triplet_with_center_infer" + ``` + + - Or download and unzip the inference model we provide + ```shell + cd PaddleClas/deploy + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/softmax_triplet_with_center_infer.tar + tar xf softmax_triplet_with_center_infer.tar + cd ../ + ``` + +##### 4.2.2 Inference based on Python prediction engine + + 1. Modify `PaddleClas/deploy/configs/inference_rec.yaml`- Change the path segment after `infer_imgs:` to any image path under the query folder in Market1501 (the configuration below uses the path of the `0294_c1s1_066631_00.jpg` image) + - Change the field after `rec_inference_model_dir:` to the decompressed softmax_triplet_with_center_infer folder path + - Change the preprocessing configuration under the `transform_ops:` field to the preprocessing configuration under `Eval.Query.dataset` in `softmax_triplet_with_center.yaml` + + ```yaml + Global: + infer_imgs: "../dataset/market1501/Market-1501-v15.09.15/query/0294_c1s1_066631_00.jpg" + rec_inference_model_dir: "./softmax_triplet_with_center_infer" + batch_size: 1 + use_gpu: False + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: False + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + + RecPreProcess: + transform_ops: + -ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + + RecPostProcess: null + ``` + + 2. Execute the inference command + + ```shell + cd PaddleClas/deploy/ + python3.7 python/predict_rec.py -c ./configs/inference_rec.yaml + ``` + + 3. Check the output result, the actual result is a vector of length 2048, which represents the feature vector obtained after the input image is transformed by the model + + ```log + 0294_c1s1_066631_00.jpg: [ 0.01806974 0.00476423 -0.00508293 ... 0.03925538 0.00377574 + -0.00849029] + ``` + The output vector for inference is stored in the `result_dict` variable in [predict_rec.py](../../../deploy/python/predict_rec.py#L134-L135). + + 4. For batch prediction, change the path after `infer_imgs:` in the configuration file to a folder, such as `../dataset/market1501/Market-1501-v15.09.15/query`, it will predict and output queries one by one The feature vectors of all the images below. + +##### 4.2.3 Inference based on C++ prediction engine + +PaddleClas provides an example of inference based on the C++ prediction engine, you can refer to [Server-side C++ prediction](../inference_deployment/cpp_deploy.md) to complete the corresponding inference deployment. If you are using the Windows platform, you can refer to the Visual Studio 2019 Community CMake Compilation Guide to complete the corresponding prediction library compilation and model prediction work. + +#### 4.3 Service deployment + +Paddle Serving provides high-performance, flexible and easy-to-use industrial-grade online inference services. Paddle Serving supports RESTful, gRPC, bRPC and other protocols, and provides inference solutions in a variety of heterogeneous hardware and operating system environments. For more introduction to Paddle Serving, please refer to the Paddle Serving code repository. + +PaddleClas provides an example of model serving deployment based on Paddle Serving. You can refer to [Model serving deployment](../inference_deployment/paddle_serving_deploy.md) to complete the corresponding deployment. + +#### 4.4 Lite deployment + +Paddle Lite is a high-performance, lightweight, flexible and easily extensible deep learning inference framework, positioned to support multiple hardware platforms including mobile, embedded and server. For more introduction to Paddle Lite, please refer to the Paddle Lite code repository. + +PaddleClas provides an example of deploying models based on Paddle Lite. You can refer to [Deployment](../inference_deployment/paddle_lite_deploy.md) to complete the corresponding deployment. + +#### 4.5 Paddle2ONNX Model Conversion and Prediction + +Paddle2ONNX supports converting PaddlePaddle model format to ONNX model format. The deployment of Paddle models to various inference engines can be completed through ONNX, including TensorRT/OpenVINO/MNN/TNN/NCNN, and other inference engines or hardware that support the ONNX open source format. For more information about Paddle2ONNX, please refer to the Paddle2ONNX code repository. + +PaddleClas provides an example of converting an inference model to an ONNX model and making inference prediction based on Paddle2ONNX. You can refer to [Paddle2ONNX model conversion and prediction](../../../deploy/paddle2onnx/readme.md) to complete the corresponding deployment work. + +### 5. Summary + +#### 5.1 Method summary and comparison + +The above algorithm can be quickly migrated to most ReID models, which can further improve the performance of ReID models. + +#### 5.2 Usage advice/FAQ + +The Market1501 dataset is relatively small, so you can try to train multiple times to get the highest accuracy. + +### 6. References + +1. [Bag of Tricks and A Strong Baseline for Deep Person Re-identification](https://openaccess.thecvf.com/content_CVPRW_2019/papers/TRMTMCT/Luo_Bag_of_Tricks_and_a_Strong_Baseline_for_Deep_Person_CVPRW_2019_paper.pdf) +2. [michuanhaohao/reid-strong-baseline](https://github.com/michuanhaohao/reid-strong-baseline) +3. [Pedestrian Re-ID dataset Market1501 dataset _star_function blog-CSDN blog _market1501 dataset](https://blog.csdn.net/qq_39220334/article/details/121470106) +4. [Deep Learning for Person Re-identification: A Survey and Outlook](https://arxiv.org/abs/2001.04193) +5. [CMC and mAP in ReID Task](https://wrong.wang/blog/20190223-reid%E4%BB%BB%E5%8A%A1%E4%B8%AD%E7%9A%84cmc%E5%92%8Cmap/) diff --git a/docs/en/faq_series/faq_2022_s1_en.md b/docs/en/faq_series/faq_2022_s1_en.md new file mode 100644 index 0000000000000000000000000000000000000000..daf3350c09258fc17b2469822b703168c211c8ff --- /dev/null +++ b/docs/en/faq_series/faq_2022_s1_en.md @@ -0,0 +1,56 @@ +# PaddleClas FAQ Summary - 2022 Season 1 + +## Before You Read + +- We collect some frequently asked questions in issues and user groups since PaddleClas is open-sourced and provide brief answers, aiming to give some reference for the majority to save you from twists and turns. +- There are many talents in the field of image classification, recognition and retrieval with quickly updated models and papers, and the answers here mainly rely on our limited project practice, so it is not possible to cover all facets. We sincerely hope that the man of insight will help to supplement and correct the content, thanks a lot. + +## Catalogue + +- [1. Theory](#1-theory) +- [2. Practice](#2-actual-combat) + - [2.1 Common problems of training and evaluation](#21-common-problems-of-training-and-evaluation) + - [Q2.1.1 How to freeze the parameters of some layers during training?](#q211-how-to-freeze-the-parameters-of-some-layers-during-training) + + +## 1. Theory + + +## 2. Practice + + +### 2.1 Common problems of training and evaluation + +#### Q2.1.1 How to freeze the parameters of some layers during training? +**A**: There are currently three methods available +1. Manually modify the model code, use `paddle.ParamAttr(learning_rate=0.0)`, and set the learning rate of the frozen layer to 0.0. For details, see [paddle.ParamAttr documentation](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/api/paddle/ParamAttr_en.html#paramattr). The following code can set the learning rate of the weight parameter of the self.conv layer to 0.0. + ```python + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=0.0), # <-- set here + bias_attr=False, + data_format=data_format) + ``` + +2. Manually set stop_gradient=True for the frozen layer, please refer to [this link](https://github.com/RainFrost1/PaddleClas/blob/24e968b8d9f7d9e2309e713cbf2afe8fda9deacd/ppcls/engine/train/train_idml.py#L40-L66). When using this method, after the gradient is returned to the layer which set strop_gradient=True, the gradient backward is stopped, that is, the weight of the previous layer will be fixed. + +3. After loss.backward() and before optimizer.step(), use the clear_gradients() method in nn.Layer. For the layer to be fixed, call this method without affecting the loss return. The following code can clear the gradient of a layer or the gradient of a parameter of a layer + ```python + import paddle + linear = paddle.nn.Linear(3, 4) + x = paddle.randn([4, 3]) + y = linear(x) + loss = y.sum().backward() + + print(linear.weight.grad) + print(linear.bias.grad) + linear.clear_gradients() # clear the gradients of the entire layer + # linear.weight.clear_grad() # Only clear the gradient of the weight parameter of the Linear layer + print(linear.weight.grad) + print(linear.bias.grad) + ``` diff --git a/docs/en/inference_deployment/classification_serving_deploy_en.md b/docs/en/inference_deployment/classification_serving_deploy_en.md new file mode 100644 index 0000000000000000000000000000000000000000..120871edddbe1ca7b6ac1b3a72a3e89e8d1de39a --- /dev/null +++ b/docs/en/inference_deployment/classification_serving_deploy_en.md @@ -0,0 +1,239 @@ +English | [简体中文](../../zh_CN/inference_deployment/classification_serving_deploy.md) + +# Classification model service deployment + +## Table of contents + +- [1 Introduction](#1-introduction) +- [2. Serving installation](#2-serving-installation) +- [3. Image Classification Service Deployment](#3-image-classification-service-deployment) + - [3.1 Model conversion](#31-model-conversion) + - [3.2 Service deployment and request](#32-service-deployment-and-request) + - [3.2.1 Python Serving](#321-python-serving) + - [3.2.2 C++ Serving](#322-c-serving) + + +## 1 Introduction + +[Paddle Serving](https://github.com/PaddlePaddle/Serving) aims to help deep learning developers easily deploy online prediction services, support one-click deployment of industrial-grade service capabilities, high concurrency between client and server Efficient communication and support for developing clients in multiple programming languages. + +This section takes the HTTP prediction service deployment as an example to introduce how to use PaddleServing to deploy the model service in PaddleClas. Currently, only Linux platform deployment is supported, and Windows platform is not currently supported. + + +## 2. Serving installation + +The Serving official website recommends using docker to install and deploy the Serving environment. First, you need to pull the docker environment and create a Serving-based docker. + +```shell +# start GPU docker +docker pull paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel +nvidia-docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel bash +nvidia-docker exec -it test bash + +# start CPU docker +docker pull paddlepaddle/serving:0.7.0-devel +docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-devel bash +docker exec -it test bash +``` + +After entering docker, you need to install Serving-related python packages. +```shell +python3.7 -m pip install paddle-serving-client==0.7.0 +python3.7 -m pip install paddle-serving-app==0.7.0 +python3.7 -m pip install faiss-cpu==1.7.1post2 + +#If it is a CPU deployment environment: +python3.7 -m pip install paddle-serving-server==0.7.0 #CPU +python3.7 -m pip install paddlepaddle==2.2.0 # CPU + +#If it is a GPU deployment environment +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post102 # GPU with CUDA10.2 + TensorRT6 +python3.7 -m pip install paddlepaddle-gpu==2.2.0 # GPU with CUDA10.2 + +#Other GPU environments need to confirm the environment and then choose which one to execute +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post101 # GPU with CUDA10.1 + TensorRT6 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post112 # GPU with CUDA11.2 + TensorRT8 +``` + +* If the installation speed is too slow, you can change the source through `-i https://pypi.tuna.tsinghua.edu.cn/simple` to speed up the installation process. +* For other environment configuration installation, please refer to: [Install Paddle Serving with Docker](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_EN.md) + + + +## 3. Image Classification Service Deployment + +The following takes the classic ResNet50_vd model as an example to introduce how to deploy the image classification service. + + +### 3.1 Model conversion + +When using PaddleServing for service deployment, you need to convert the saved inference model into a Serving model. +- Go to the working directory: + ```shell + cd deploy/paddleserving + ``` +- Download and unzip the inference model for ResNet50_vd: + ```shell + # Download ResNet50_vd inference model + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar + # Decompress the ResNet50_vd inference model + tar xf ResNet50_vd_infer.tar + ``` +- Use the paddle_serving_client command to convert the downloaded inference model into a model format for easy server deployment: + ```shell + # Convert ResNet50_vd model + python3.7 -m paddle_serving_client.convert \ + --dirname ./ResNet50_vd_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./ResNet50_vd_serving/ \ + --serving_client ./ResNet50_vd_client/ + ``` + The specific meaning of the parameters in the above command is shown in the following table + | parameter | type | default value | description | + | --------- | ---- | ------------- | ----------- | |--- | + | `dirname` | str | - | The storage path of the model file to be converted. The program structure file and parameter file are saved in this directory. | + | `model_filename` | str | None | The name of the file storing the model Inference Program structure that needs to be converted. If set to None, use `__model__` as the default filename | + | `params_filename` | str | None | File name where all parameters of the model to be converted are stored. It needs to be specified if and only if all model parameters are stored in a single binary file. If the model parameters are stored in separate files, set it to None | + | `serving_server` | str | `"serving_server"` | The storage path of the converted model files and configuration files. Default is serving_server | + | `serving_client` | str | `"serving_client"` | The converted client configuration file storage path. Default is serving_client | + + After the ResNet50_vd inference model conversion is completed, there will be additional `ResNet50_vd_serving` and `ResNet50_vd_client` folders in the current folder, with the following structure: + ```shell + ├── ResNet50_vd_serving/ + │ ├── inference.pdiparams + │ ├── inference.pdmodel + │ ├── serving_server_conf.prototxt + │ └── serving_server_conf.stream.prototxt + │ + └── ResNet50_vd_client/ + ├── serving_client_conf.prototxt + └── serving_client_conf.stream.prototxt + ``` + +- Serving provides the function of input and output renaming in order to be compatible with the deployment of different models. When different models are deployed in inference, you only need to modify the `alias_name` of the configuration file, and the inference deployment can be completed without modifying the code. Therefore, after the conversion, you need to modify the alias names in the files `serving_server_conf.prototxt` under `ResNet50_vd_serving` and `ResNet50_vd_client` respectively, and change the `alias_name` in `fetch_var` to `prediction`, the modified serving_server_conf.prototxt is as follows Show: + ```log + feed_var { + name: "inputs" + alias_name: "inputs" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 + } + fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "prediction" + is_lod_tensor: false + fetch_type: 1 + shape: 1000 + } + ``` + +### 3.2 Service deployment and request + +The paddleserving directory contains the code for starting the pipeline service, the C++ serving service and sending the prediction request, mainly including: +```shell +__init__.py +classification_web_service.py # Script to start the pipeline server +config.yml # Configuration file to start the pipeline service +pipeline_http_client.py # Script for sending pipeline prediction requests in http mode +pipeline_rpc_client.py # Script for sending pipeline prediction requests in rpc mode +readme.md # Classification model service deployment document +run_cpp_serving.sh # Start the C++ Serving departmentscript +test_cpp_serving_client.py # Script for sending C++ serving prediction requests in rpc mode +``` + +#### 3.2.1 Python Serving + +- Start the service: + ```shell + # Start the service and save the running log in log.txt + python3.7 classification_web_service.py &>log.txt & + ``` + +- send request: + ```shell + # send service request + python3.7 pipeline_http_client.py + ``` + After a successful run, the results of the model prediction will be printed in the cmd window, and the results are as follows: + ```log + {'err_no': 0, 'err_msg': '', 'key': ['label', 'prob'], 'value': ["['daisy']", '[0.9341402053833008]'], 'tensors ': []} + ``` +- turn off the service +If the service program is running in the foreground, you can press `Ctrl+C` to terminate the server program; if it is running in the background, you can use the kill command to close related processes, or you can execute the following command in the path where the service program is started to terminate the server program: + ```bash + python3.7 -m paddle_serving_server.serve stop + ``` + After the execution is completed, the `Process stopped` message appears, indicating that the service was successfully shut down. + + +#### 3.2.2 C++ Serving + +Different from Python Serving, the C++ Serving client calls C++ OP to predict, so before starting the service, you need to compile and install the serving server package, and set `SERVING_BIN`. + +- Compile and install the Serving server package + ```shell + # Enter the working directory + cd PaddleClas/deploy/paddleserving + # One-click compile and install Serving server, set SERVING_BIN + source ./build_server.sh python3.7 + ``` + **Note: The path set by **[build_server.sh](./build_server.sh#L55-L62) may need to be modified according to the actual machine environment such as CUDA, python version, etc., and then compiled. + +- Modify the client file `ResNet50_client/serving_client_conf.prototxt` , change the field after `feed_type:` to 20, change the field after the first `shape:` to 1 and delete the rest of the `shape` fields. + ```log + feed_var { + name: "inputs" + alias_name: "inputs" + is_lod_tensor: false + feed_type: 20 + shape: 1 + } + ``` +- Modify part of the code of [`test_cpp_serving_client`](./test_cpp_serving_client.py) + 1. Modify the [`feed={"inputs": image}`](./test_cpp_serving_client.py#L28) part of the code, and change the path after `load_client_config` to `ResNet50_client/serving_client_conf.prototxt` . + 2. Modify the [`feed={"inputs": image}`](./test_cpp_serving_client.py#L45) part of the code, and change `inputs` to be the same as the `feed_var` field in `ResNet50_client/serving_client_conf.prototxt` name` is the same. Since `name` in some model client files is `x` instead of `inputs` , you need to pay attention to this when using these models for C++ Serving deployment. + +- Start the service: + ```shell + # Start the service, the service runs in the background, and the running log is saved in nohup.txt + # CPU deployment + sh run_cpp_serving.sh + # GPU deployment and specify card 0 + sh run_cpp_serving.sh 0 + ``` + +- send request: + ```shell + # send service request + python3.7 test_cpp_serving_client.py + ``` + After a successful run, the results of the model prediction will be printed in the cmd window, and the results are as follows: + ```log + prediction: daisy, probability: 0.9341399073600769 + ``` +- close the service: + If the service program is running in the foreground, you can press `Ctrl+C` to terminate the server program; if it is running in the background, you can use the kill command to close related processes, or you can execute the following command in the path where the service program is started to terminate the server program: + ```bash + python3.7 -m paddle_serving_server.serve stop + ``` + After the execution is completed, the `Process stopped` message appears, indicating that the service was successfully shut down. + +##4.FAQ + +**Q1**: No result is returned after the request is sent or an output decoding error is prompted + +**A1**: Do not set the proxy when starting the service and sending the request. You can close the proxy before starting the service and sending the request. The command to close the proxy is: +```shell +unset https_proxy +unset http_proxy +``` + +**Q2**: nothing happens after starting the service + +**A2**: You can check whether the path corresponding to `model_config` in `config.yml` exists, and whether the folder name is correct + +For more service deployment types, such as `RPC prediction service`, you can refer to Serving's [github official website](https://github.com/PaddlePaddle/Serving/tree/v0.9.0/examples) diff --git a/docs/en/inference_deployment/paddle_hub_serving_deploy_en.md b/docs/en/inference_deployment/paddle_hub_serving_deploy_en.md index c89142911f12ffcb2622fb8b5912cd9c960e56c4..4dddc94bd8456a882e42000b640870155f46da7c 100644 --- a/docs/en/inference_deployment/paddle_hub_serving_deploy_en.md +++ b/docs/en/inference_deployment/paddle_hub_serving_deploy_en.md @@ -1,11 +1,10 @@ -# Service deployment based on PaddleHub Serving +English | [简体中文](../../zh_CN/inference_deployment/paddle_hub_serving_deploy.md) -PaddleClas supports rapid service deployment through Paddlehub. At present, it supports the deployment of image classification. Please look forward to the deployment of image recognition. +# Service deployment based on PaddleHub Serving ---- +PaddleClas supports rapid service deployment through PaddleHub. Currently, the deployment of image classification is supported. Please look forward to the deployment of image recognition. ## Catalogue - - [1. Introduction](#1) - [2. Prepare the environment](#2) - [3. Download inference model](#3) @@ -16,97 +15,101 @@ PaddleClas supports rapid service deployment through Paddlehub. At present, it s - [6. Send prediction requests](#6) - [7. User defined service module modification](#7) + -## 1. Introduction +## 1 Introduction -HubServing service pack contains 3 files, the directory is as follows: +The hubserving service deployment configuration service package `clas` contains 3 required files, the directories are as follows: +```shell +deploy/hubserving/clas/ +├── __init__.py # Empty file, required +├── config.json # Configuration file, optional, passed in as a parameter when starting the service with configuration +├── module.py # The main module, required, contains the complete logic of the service +└── params.py # Parameter file, required, including model path, pre- and post-processing parameters and other parameters ``` -hubserving/clas/ - └─ __init__.py Empty file, required - └─ config.json Configuration file, optional, passed in as a parameter when using configuration to start the service - └─ module.py Main module file, required, contains the complete logic of the service - └─ params.py Parameter file, required, including parameters such as model path, pre- and post-processing parameters -``` + ## 2. Prepare the environment - ```shell -# Install version 2.0 of PaddleHub -pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple +# Install paddlehub, version 2.1.0 is recommended +python3.7 -m pip install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple ``` + -## 3. Download inference model +## 3. Download the inference model -Before installing the service module, you need to prepare the inference model and put it in the correct path. The default model path is: +Before installing the service module, you need to prepare the inference model and put it in the correct path. The default model path is: -* Model structure file: `PaddleClas/inference/inference.pdmodel` -* Model parameters file: `PaddleClas/inference/inference.pdiparams` +* Classification inference model structure file: `PaddleClas/inference/inference.pdmodel` +* Classification inference model weight file: `PaddleClas/inference/inference.pdiparams` **Notice**: -* The model file path can be viewed and modified in `PaddleClas/deploy/hubserving/clas/params.py`. -* It should be noted that the prefix of model structure file and model parameters file must be `inference`. -* More models provided by PaddleClas can be obtained from the [model library](../algorithm_introduction/ImageNet_models_en.md). You can also use models trained by yourself. +* Model file paths can be viewed and modified in `PaddleClas/deploy/hubserving/clas/params.py`: + + ```python + "inference_model_dir": "../inference/" + ``` +* Model files (including `.pdmodel` and `.pdiparams`) must be named `inference`. +* We provide a large number of pre-trained models based on the ImageNet-1k dataset. For the model list and download address, see [Model Library Overview](../algorithm_introduction/ImageNet_models.md), or you can use your own trained and converted models. + -## 4. Install Service Module +## 4. Install the service module -* On Linux platform, the examples are as follows. -```shell -cd PaddleClas/deploy -hub install hubserving/clas/ -``` +* In the Linux environment, the installation example is as follows: + ```shell + cd PaddleClas/deploy + # Install the service module: + hub install hubserving/clas/ + ``` + +* In the Windows environment (the folder separator is `\`), the installation example is as follows: + + ```shell + cd PaddleClas\deploy + # Install the service module: + hub install hubserving\clas\ + ``` -* On Windows platform, the examples are as follows. -```shell -cd PaddleClas\deploy -hub install hubserving\clas\ -``` ## 5. Start service + ### 5.1 Start with command line parameters -This method only supports CPU. Command as follow: +This method only supports prediction using CPU. Start command: ```shell -$ hub serving start --modules Module1==Version1 \ - --port XXXX \ - --use_multiprocess \ - --workers \ -``` - -**parameters:** - -|parameters|usage| -|-|-| -|--modules/-m|PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs
*`When Version is not specified, the latest version is selected by default`*| -|--port/-p|Service port, default is 8866| -|--use_multiprocess|Enable concurrent mode, the default is single-process mode, this mode is recommended for multi-core CPU machines
*`Windows operating system only supports single-process mode`*| -|--workers|The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores| - -For example, start service: - -```shell -hub serving start -m clas_system -``` +hub serving start \ +--modules clas_system +--port 8866 +``` +This completes the deployment of a serviced API, using the default port number 8866. -This completes the deployment of a service API, using the default port number 8866. +**Parameter Description**: +|parameters|uses| +|-|-| +|--modules/-m| [**required**] PaddleHub Serving pre-installed model, listed in the form of multiple Module==Version key-value pairs
*`When no Version is specified, the latest is selected by default version `*| +|--port/-p| [**OPTIONAL**] Service port, default is 8866| +|--use_multiprocess| [**Optional**] Whether to enable the concurrent mode, the default is single-process mode, it is recommended to use this mode for multi-core CPU machines
*`Windows operating system only supports single-process mode`*| +|--workers| [**Optional**] The number of concurrent tasks specified in concurrent mode, the default is `2*cpu_count-1`, where `cpu_count` is the number of CPU cores| +For more deployment details, see [PaddleHub Serving Model One-Click Service Deployment](https://paddlehub.readthedocs.io/zh_CN/release-v2.1/tutorial/serving.html) ### 5.2 Start with configuration file -This method supports CPU and GPU. Command as follow: +This method only supports prediction using CPU or GPU. Start command: ```shell -hub serving start --config/-c config.json -``` +hub serving start -c config.json +``` -Wherein, the format of `config.json` is as follows: +Among them, the format of `config.json` is as follows: ```json { @@ -127,18 +130,19 @@ Wherein, the format of `config.json` is as follows: } ``` -- The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`. Among them, - - when `use_gpu` is `true`, it means that the GPU is used to start the service. - - when `enable_mkldnn` is `true`, it means that use MKL-DNN to accelerate. -- The configurable parameters in `predict_args` are consistent with the `predict` function interface in `module.py`. +**Parameter Description**: +* The configurable parameters in `init_args` are consistent with the `_initialize` function interface in `module.py`. in, + - When `use_gpu` is `true`, it means to use GPU to start the service. + - When `enable_mkldnn` is `true`, it means to use MKL-DNN acceleration. +* The configurable parameters in `predict_args` are consistent with the `predict` function interface in `module.py`. -**Note:** -- When using the configuration file to start the service, other parameters will be ignored. -- If you use GPU prediction (that is, `use_gpu` is set to `true`), you need to set the environment variable CUDA_VISIBLE_DEVICES before starting the service, such as: ```export CUDA_VISIBLE_DEVICES=0```, otherwise you do not need to set it. -- **`use_gpu` and `use_multiprocess` cannot be `true` at the same time.** -- **When both `use_gpu` and `enable_mkldnn` are set to `true` at the same time, GPU is used to run and `enable_mkldnn` will be ignored.** +**Notice**: +* When using the configuration file to start the service, the parameter settings in the configuration file will be used, and other command line parameters will be ignored; +* If you use GPU prediction (ie, `use_gpu` is set to `true`), you need to set the `CUDA_VISIBLE_DEVICES` environment variable to specify the GPU card number used before starting the service, such as: `export CUDA_VISIBLE_DEVICES=0`; +* **`use_gpu` cannot be `true`** at the same time as `use_multiprocess`; +* ** When both `use_gpu` and `enable_mkldnn` are `true`, `enable_mkldnn` will be ignored and GPU** will be used. -For example, use GPU card No. 3 to start the 2-stage series service: +If you use GPU No. 3 card to start the service: ```shell cd PaddleClas/deploy @@ -149,88 +153,86 @@ hub serving start -c hubserving/clas/config.json ## 6. Send prediction requests -After the service starting, you can use the following command to send a prediction request to obtain the prediction result: +After configuring the server, you can use the following command to send a prediction request to get the prediction result: ```shell cd PaddleClas/deploy -python hubserving/test_hubserving.py server_url image_path +python3.7 hubserving/test_hubserving.py \ +--server_url http://127.0.0.1:8866/predict/clas_system \ +--image_file ./hubserving/ILSVRC2012_val_00006666.JPEG \ +--batch_size 8 +``` +**Predicted output** +```log +The result(s): class_ids: [57, 67, 68, 58, 65], label_names: ['garter snake, grass snake', 'diamondback, diamondback rattlesnake, Crotalus adamanteus', 'sidewinder, horned rattlesnake, Crotalus cerastes' , 'water snake', 'sea snake'], scores: [0.21915, 0.15631, 0.14794, 0.13177, 0.12285] +The average time of prediction cost: 2.970 s/image +The average time cost: 3.014 s/image +The average top-1 score: 0.110 ``` -Two required parameters need to be passed to the script: - -- **server_url**: service address,format of which is -`http://[ip_address]:[port]/predict/[module_name]` -- **image_path**: Test image path, can be a single image path or an image directory path -- **batch_size**: [**Optional**] batch_size. Default by `1`. -- **resize_short**: [**Optional**] In preprocessing, resize according to short size. Default by `256`。 -- **crop_size**: [**Optional**] In preprocessing, centor crop size. Default by `224`。 -- **normalize**: [**Optional**] In preprocessing, whether to do `normalize`. Default by `True`。 -- **to_chw**: [**Optional**] In preprocessing, whether to transpose to `CHW`. Default by `True`。 +**Script parameter description**: +* **server_url**: Service address, the format is `http://[ip_address]:[port]/predict/[module_name]`. +* **image_path**: The test image path, which can be a single image path or an image collection directory path. +* **batch_size**: [**OPTIONAL**] Make predictions in `batch_size` size, default is `1`. +* **resize_short**: [**optional**] When preprocessing, resize by short edge, default is `256`. +* **crop_size**: [**Optional**] The size of the center crop during preprocessing, the default is `224`. +* **normalize**: [**Optional**] Whether to perform `normalize` during preprocessing, the default is `True`. +* **to_chw**: [**Optional**] Whether to adjust to `CHW` order when preprocessing, the default is `True`. -**Notice**: -If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `--resize_short=384`, `--crop_size=384`. +**Note**: If you use `Transformer` series models, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input data size of the model, you need to specify `--resize_short=384 -- crop_size=384`. -**Eg.** +**Return result format description**: +The returned result is a list (list), including the top-k classification results, the corresponding scores, and the time-consuming prediction of this image, as follows: ```shell -python hubserving/test_hubserving.py --server_url http://127.0.0.1:8866/predict/clas_system --image_file ./hubserving/ILSVRC2012_val_00006666.JPEG --batch_size 8 +list: return result +└──list: first image result + ├── list: the top k classification results, sorted in descending order of score + ├── list: the scores corresponding to the first k classification results, sorted in descending order of score + └── float: The image classification time, in seconds ``` -The returned result is a list, including the `top_k`'s classification results, corresponding scores and the time cost of prediction, details as follows. - -``` -list: The returned results -└─ list: The result of first picture - └─ list: The top-k classification results, sorted in descending order of score - └─ list: The scores corresponding to the top-k classification results, sorted in descending order of score - └─ float: The time cost of predicting the picture, unit second -``` -**Note:** If you need to add, delete or modify the returned fields, you can modify the corresponding module. For the details, refer to the user-defined modification service module in the next section. ## 7. User defined service module modification -If you need to modify the service logic, the following steps are generally required: - -1. Stop service -```shell -hub serving stop --port/-p XXXX -``` - -2. Modify the code in the corresponding files, like `module.py` and `params.py`, according to the actual needs. You need re-install(hub install hubserving/clas/) and re-deploy after modifing `module.py`. -After modifying and installing and before deploying, you can use `python hubserving/clas/module.py` to test the installed service module. +If you need to modify the service logic, you need to do the following: -For example, if you need to replace the model used by the deployed service, you need to modify model path parameters `cfg.model_file` and `cfg.params_file` in `params.py`. Of course, other related parameters may need to be modified at the same time. Please modify and debug according to the actual situation. - -3. Uninstall old service module -```shell -hub uninstall clas_system -``` +1. Stop the service + ```shell + hub serving stop --port/-p XXXX + ``` -4. Install modified service module -```shell -hub install hubserving/clas/ -``` +2. Go to the corresponding `module.py` and `params.py` and other files to modify the code according to actual needs. `module.py` needs to be reinstalled after modification (`hub install hubserving/clas/`) and deployed. Before deploying, you can use the `python3.7 hubserving/clas/module.py` command to quickly test the code ready for deployment. -5. Restart service -```shell -hub serving start -m clas_system -``` +3. Uninstall the old service pack + ```shell + hub uninstall clas_system + ``` -**Note**: +4. Install the new modified service pack + ```shell + hub install hubserving/clas/ + ``` -Common parameters can be modified in params.py: -* Directory of model files(include model structure file and model parameters file): - ```python - "inference_model_dir": - ``` -* The number of Top-k results returned during post-processing: - ```python - 'topk': - ``` -* Mapping file corresponding to label and class ID during post-processing: - ```python - 'class_id_map_file': - ``` +5. Restart the service + ```shell + hub serving start -m clas_system + ``` -In order to avoid unnecessary delay and be able to predict in batch, the preprocessing (include resize, crop and other) is completed in the client, so modify [test_hubserving.py](../../../deploy/hubserving/test_hubserving.py#L35-L52) if necessary. +**Notice**: +Common parameters can be modified in `PaddleClas/deploy/hubserving/clas/params.py`: + * To replace the model, you need to modify the model file path parameters: + ```python + "inference_model_dir": + ``` + * Change the number of `top-k` results returned when postprocessing: + ```python + 'topk': + ``` + * The mapping file corresponding to the lable and class id when changing the post-processing: + ```python + 'class_id_map_file': + ``` + +In order to avoid unnecessary delay and be able to predict with batch_size, data preprocessing logic (including `resize`, `crop` and other operations) is completed on the client side, so it needs to be in [PaddleClas/deploy/hubserving/test_hubserving.py# L41-L47](../../../deploy/hubserving/test_hubserving.py#L41-L47) and [PaddleClas/deploy/hubserving/test_hubserving.py#L51-L76](../../../deploy/hubserving/test_hubserving.py#L51-L76) Modify the data preprocessing logic related code. diff --git a/docs/en/inference_deployment/paddle_serving_deploy_en.md b/docs/en/inference_deployment/paddle_serving_deploy_en.md deleted file mode 100644 index 7a602920f0271abb35cc532d83af97a98f7a6310..0000000000000000000000000000000000000000 --- a/docs/en/inference_deployment/paddle_serving_deploy_en.md +++ /dev/null @@ -1,280 +0,0 @@ -# Model Service Deployment - -## Catalogue - -- [1. Introduction](#1) -- [2. Installation of Serving](#2) -- [3. Service Deployment for Image Classification](#3) - - [3.1 Model Transformation](#3.1) - - [3.2 Service Deployment and Request](#3.2) -- [4. Service Deployment for Image Recognition](#4) - - [4.1 Model Transformation](#4.1) - - [4.2 Service Deployment and Request](#4.2) -- [5. FAQ](#5) - - -## 1. Introduction - -[Paddle Serving](https://github.com/PaddlePaddle/Serving) is designed to provide easy deployment of on-line prediction services for deep learning developers, it supports one-click deployment of industrial-grade services, highly concurrent and efficient communication between client and server, and multiple programming languages for client development. - -This section, exemplified by HTTP deployment of prediction service, describes how to deploy model services in PaddleClas with PaddleServing. Currently, only deployment on Linux platform is supported. Windows platform is not supported. - - -## 2. Installation of Serving - -It is officially recommended to use docker for the installation and environment deployment of Serving. First, pull the docker and create a Serving-based one. - -``` -docker pull paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel -nvidia-docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel bash -nvidia-docker exec -it test bash -``` - -Once you are in docker, install the Serving-related python packages. - -``` -pip3 install paddle-serving-client==0.7.0 -pip3 install paddle-serving-server==0.7.0 # CPU -pip3 install paddle-serving-app==0.7.0 -pip3 install paddle-serving-server-gpu==0.7.0.post102 #GPU with CUDA10.2 + TensorRT6 -# For other GPU environemnt, confirm the environment before choosing which one to execute -pip3 install paddle-serving-server-gpu==0.7.0.post101 # GPU with CUDA10.1 + TensorRT6 -pip3 install paddle-serving-server-gpu==0.7.0.post112 # GPU with CUDA11.2 + TensorRT8 -``` - -- Speed up the installation process by replacing the source with `-i https://pypi.tuna.tsinghua.edu.cn/simple`. -- For other environment configuration and installation, please refer to [Install Paddle Serving using docker](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_EN.md) -- To deploy CPU services, please install the CPU version of serving-server with the following command. - -``` -pip install paddle-serving-server -``` - - -## 3. Service Deployment for Image Classification - - -### 3.1 Model Transformation - -When adopting PaddleServing for service deployment, the saved inference model needs to be converted to a Serving model. The following part takes the classic ResNet50_vd model as an example to introduce the deployment of image classification service. - -- Enter the working directory: - -``` -cd deploy/paddleserving -``` - -- Download the inference model of ResNet50_vd: - -``` -# Download and decompress the ResNet50_vd model -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar -``` - -- Convert the downloaded inference model into a format that is readily deployable by Server with the help of paddle_serving_client. - -``` -# Convert the ResNet50_vd model -python3 -m paddle_serving_client.convert --dirname ./ResNet50_vd_infer/ \ - --model_filename inference.pdmodel \ - --params_filename inference.pdiparams \ - --serving_server ./ResNet50_vd_serving/ \ - --serving_client ./ResNet50_vd_client/ -``` - -After the transformation, `ResNet50_vd_serving` and `ResNet50_vd_client` will be added to the current folder in the following format: - -``` -|- ResNet50_vd_server/ - |- __model__ - |- __params__ - |- serving_server_conf.prototxt - |- serving_server_conf.stream.prototxt -|- ResNet50_vd_client - |- serving_client_conf.prototxt - |- serving_client_conf.stream.prototxt -``` - -Having obtained the model file, modify the alias name in `serving_server_conf.prototxt` under directory `ResNet50_vd_server` by changing `alias_name` in `fetch_var` to `prediction`. - -**Notes**: Serving supports input and output renaming to ensure its compatibility with the deployment of different models. In this case, modifying the alias_name of the configuration file is the only step needed to complete the inference and deployment of all kinds of models. The modified serving_server_conf.prototxt is shown below: - -``` -feed_var { - name: "inputs" - alias_name: "inputs" - is_lod_tensor: false - feed_type: 1 - shape: 3 - shape: 224 - shape: 224 -} -fetch_var { - name: "save_infer_model/scale_0.tmp_1" - alias_name: "prediction" - is_lod_tensor: true - fetch_type: 1 - shape: -1 -} -``` - - -### 3.2 Service Deployment and Request - -Paddleserving's directory contains the code to start the pipeline service and send prediction requests, including: - -``` -__init__.py -config.yml # Configuration file for starting the service -pipeline_http_client.py # Script for sending pipeline prediction requests by http -pipeline_rpc_client.py # Script for sending pipeline prediction requests by rpc -classification_web_service.py # Script for starting the pipeline server -``` - -- Start the service: - -``` -# Start the service and the run log is saved in log.txt -python3 classification_web_service.py &>log.txt & -``` - -Once the service is successfully started, a log will be printed in log.txt similar to the following ![img](../../../deploy/paddleserving/imgs/start_server.png) - -- Send request: - -``` -# Send service request -python3 pipeline_http_client.py -``` - -Once the service is successfully started, the prediction results will be printed in the cmd window, see the following example:![img](../../../deploy/paddleserving/imgs/results.png) - - - -## 4. Service Deployment for Image Recognition - -When using PaddleServing for service deployment, the saved inference model needs to be converted to a Serving model. The following part, exemplified by the ultra-lightweight model for image recognition in PP-ShiTu, details the deployment of image recognition service. - - - -## 4.1 Model Transformation - -- Download inference models for general detection and general recognition - -``` -cd deploy -# Download and decompress general recogntion models -wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar -cd models -tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar -# Download and decompress general detection models -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar -tar -xf picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar -``` - -- Convert the inference model for recognition into a Serving model: - -``` -# Convert the recognition model -python3 -m paddle_serving_client.convert --dirname ./general_PPLCNet_x2_5_lite_v1.0_infer/ \ - --model_filename inference.pdmodel \ - --params_filename inference.pdiparams \ - --serving_server ./general_PPLCNet_x2_5_lite_v1.0_serving/ \ - --serving_client ./general_PPLCNet_x2_5_lite_v1.0_client/ -``` - -After the transformation, `general_PPLCNet_x2_5_lite_v1.0_serving/` and `general_PPLCNet_x2_5_lite_v1.0_serving/` will be added to the current folder. Modify the alias name in serving_server_conf.prototxt under the directory `general_PPLCNet_x2_5_lite_v1.0_serving/` by changing `alias_name` to `features` in `fetch_var`. The modified serving_server_conf.prototxt is similar to the following: - -``` -feed_var { - name: "x" - alias_name: "x" - is_lod_tensor: false - feed_type: 1 - shape: 3 - shape: 224 - shape: 224 -} -fetch_var { - name: "save_infer_model/scale_0.tmp_1" - alias_name: "features" - is_lod_tensor: true - fetch_type: 1 - shape: -1 -} -``` - -- Convert the inference model for detection into a Serving model: - -``` -# Convert the general detection model -python3 -m paddle_serving_client.convert --dirname ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ \ - --model_filename inference.pdmodel \ - --params_filename inference.pdiparams \ - --serving_server ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ \ - --serving_client ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ -``` - -After the transformation, `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/` and `picodet_PPLCNet_x2_5_ mainbody_lite_v1.0_client/` will be added to the current folder. - -**Note:** The alias name in the serving_server_conf.prototxt under the directory`picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/` requires no modification. - -- Download and decompress the constructed search library index - -``` -cd ../ -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar && tar -xf drink_dataset_v1.0.tar -``` - - -## 4.2 Service Deployment and Request - -**Note:** Since the recognition service involves multiple models, PipeLine is adopted for better performance. This deployment method does not support the windows platform for now. - -- Enter the working directory - -``` -cd ./deploy/paddleserving/recognition -``` - -Paddleserving's directory contains the code to start the pipeline service and send prediction requests, including: - -``` -__init__.py -config.yml # Configuration file for starting the service -pipeline_http_client.py # Script for sending pipeline prediction requests by http -pipeline_rpc_client.py # Script for sending pipeline prediction requests by rpc -recognition_web_service.py # Script for starting the pipeline server -``` - -- Start the service: - -``` -# Start the service and the run log is saved in log.txt -python3 recognition_web_service.py &>log.txt & -``` - -Once the service is successfully started, a log will be printed in log.txt similar to the following ![img](../../../deploy/paddleserving/imgs/start_server_shitu.png) - -- Send request: - -``` -python3 pipeline_http_client.py -``` - -Once the service is successfully started, the prediction results will be printed in the cmd window, see the following example: ![img](../../../deploy/paddleserving/imgs/results_shitu.png) - - - -## 5.FAQ - -**Q1**: After sending a request, no result is returned or the output is prompted with a decoding error. - -**A1**: Please turn off the proxy before starting the service and sending requests, try the following command: - -``` -unset https_proxy -unset http_proxy -``` - -For more types of service deployment, such as `RPC prediction services`, you can refer to the [github official website](https://github.com/PaddlePaddle/Serving/tree/v0.7.0/examples) of Serving. diff --git a/docs/en/inference_deployment/recognition_serving_deploy_en.md b/docs/en/inference_deployment/recognition_serving_deploy_en.md new file mode 100644 index 0000000000000000000000000000000000000000..bf8061376a6db8fb6cb8c256c8cc5a74c0fb1326 --- /dev/null +++ b/docs/en/inference_deployment/recognition_serving_deploy_en.md @@ -0,0 +1,282 @@ +English | [简体中文](../../zh_CN/inference_deployment/recognition_serving_deploy.md) + +# Recognition model service deployment + +## Table of contents + +- [1 Introduction](#1-introduction) +- [2. Serving installation](#2-serving-installation) +- [3. Image recognition service deployment](#3-image-recognition-service-deployment) + - [3.1 Model conversion](#31-model-conversion) + - [3.2 Service deployment and request](#32-service-deployment-and-request) + - [3.2.1 Python Serving](#321-python-serving) + - [3.2.2 C++ Serving](#322-c-serving) +- [4. FAQ](#4-faq) + + +## 1 Introduction + +[Paddle Serving](https://github.com/PaddlePaddle/Serving) aims to help deep learning developers easily deploy online prediction services, support one-click deployment of industrial-grade service capabilities, high concurrency between client and server Efficient communication and support for developing clients in multiple programming languages. + +This section takes the HTTP prediction service deployment as an example to introduce how to use PaddleServing to deploy the model service in PaddleClas. Currently, only Linux platform deployment is supported, and Windows platform is not currently supported. + + +## 2. Serving installation + +The Serving official website recommends using docker to install and deploy the Serving environment. First, you need to pull the docker environment and create a Serving-based docker. + +```shell +# start GPU docker +docker pull paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel +nvidia-docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel bash +nvidia-docker exec -it test bash + +# start CPU docker +docker pull paddlepaddle/serving:0.7.0-devel +docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-devel bash +docker exec -it test bash +``` + +After entering docker, you need to install Serving-related python packages. +```shell +python3.7 -m pip install paddle-serving-client==0.7.0 +python3.7 -m pip install paddle-serving-app==0.7.0 +python3.7 -m pip install faiss-cpu==1.7.1post2 + +#If it is a CPU deployment environment: +python3.7 -m pip install paddle-serving-server==0.7.0 #CPU +python3.7 -m pip install paddlepaddle==2.2.0 # CPU + +#If it is a GPU deployment environment +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post102 # GPU with CUDA10.2 + TensorRT6 +python3.7 -m pip install paddlepaddle-gpu==2.2.0 # GPU with CUDA10.2 + +#Other GPU environments need to confirm the environment and then choose which one to execute +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post101 # GPU with CUDA10.1 + TensorRT6 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post112 # GPU with CUDA11.2 + TensorRT8 +``` + +* If the installation speed is too slow, you can change the source through `-i https://pypi.tuna.tsinghua.edu.cn/simple` to speed up the installation process. +* For other environment configuration installation, please refer to: [Install Paddle Serving with Docker](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_CN.md) + + + + +## 3. Image recognition service deployment + +When using PaddleServing for image recognition service deployment, **need to convert multiple saved inference models to Serving models**. The following takes the ultra-lightweight image recognition model in PP-ShiTu as an example to introduce the deployment of image recognition services. + +### 3.1 Model conversion + +- Go to the working directory: + ```shell + cd deploy/ + ``` +- Download generic detection inference model and generic recognition inference model + ```shell + # Create and enter the models folder + mkdir models + cd models + # Download and unzip the generic recognition model + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar + tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar + # Download and unzip the generic detection model + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar + tar -xf picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar + ``` +- Convert the generic recognition inference model to the Serving model: + ```shell + # Convert the generic recognition model + python3.7 -m paddle_serving_client.convert \ + --dirname ./general_PPLCNet_x2_5_lite_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./general_PPLCNet_x2_5_lite_v1.0_serving/ \ + --serving_client ./general_PPLCNet_x2_5_lite_v1.0_client/ + ``` + The meaning of the parameters of the above command is the same as [#3.1 Model conversion](#3.1) + + After the recognition inference model is converted, there will be additional folders `general_PPLCNet_x2_5_lite_v1.0_serving/` and `general_PPLCNet_x2_5_lite_v1.0_client/` in the current folder. Modify the name of `alias` in `serving_server_conf.prototxt` in `general_PPLCNet_x2_5_lite_v1.0_serving/` and `general_PPLCNet_x2_5_lite_v1.0_client/` directories respectively: Change `alias_name` in `fetch_var` to `features`. The content of the modified `serving_server_conf.prototxt` is as follows + + ```log + feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 + } + fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: false + fetch_type: 1 + shape: 512 + } + ``` + + After the conversion of the general recognition inference model is completed, there will be additional `general_PPLCNet_x2_5_lite_v1.0_serving/` and `general_PPLCNet_x2_5_lite_v1.0_client/` folders in the current folder, with the following structure: + ```shell + ├── general_PPLCNet_x2_5_lite_v1.0_serving/ + │ ├── inference.pdiparams + │ ├── inference.pdmodel + │ ├── serving_server_conf.prototxt + │ └── serving_server_conf.stream.prototxt + │ + └── general_PPLCNet_x2_5_lite_v1.0_client/ + ├── serving_client_conf.prototxt + └── serving_client_conf.stream.prototxt + ``` +- Convert general detection inference model to Serving model: + ```shell + # Convert generic detection model + python3.7 -m paddle_serving_client.convert --dirname ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ \ + --serving_client ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ + ``` + The meaning of the parameters of the above command is the same as [#3.1 Model conversion](#3.1) + + After the conversion of the general detection inference model is completed, there will be additional folders `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/` and `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/` in the current folder, with the following structure: + ```shell + ├── picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ + │ ├── inference.pdiparams + │ ├── inference.pdmodel + │ ├── serving_server_conf.prototxt + │ └── serving_server_conf.stream.prototxt + │ + └── picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ + ├── serving_client_conf.prototxt + └── serving_client_conf.stream.prototxt + ``` + The specific meaning of the parameters in the above command is shown in the following table + | parameter | type | default value | description | + | ----------------- | ---- | ------------------ | ----------------------------------------------------- | + | `dirname` | str | - | The storage path of the model file to be converted. The program structure file and parameter file are saved in this directory.| + | `model_filename` | str | None | The name of the file storing the model Inference Program structure that needs to be converted. If set to None, use `__model__` as the default filename | + | `params_filename` | str | None | The name of the file that stores all parameters of the model that need to be transformed. It needs to be specified if and only if all model parameters are stored in a single binary file. If the model parameters are stored in separate files, set it to None | + | `serving_server` | str | `"serving_server"` | The storage path of the converted model files and configuration files. Default is serving_server | + | `serving_client` | str | `"serving_client"` | The converted client configuration file storage path. Default is | + +- Download and unzip the index of the retrieval library that has been built + ```shell + # Go back to the deploy directory + cd ../ + # Download the built retrieval library index + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar + # Decompress the built retrieval library index + tar -xf drink_dataset_v1.0.tar + ``` + +### 3.2 Service deployment and request + +**Note:** The identification service involves multiple models, and the PipeLine deployment method is used for performance reasons. The Pipeline deployment method currently does not support the windows platform. +- go to the working directory + ```shell + cd ./deploy/paddleserving/recognition + ``` + The paddleserving directory contains code to start the Python Pipeline service, the C++ Serving service, and send prediction requests, including: + ```shell + __init__.py + config.yml # The configuration file to start the python pipeline service + pipeline_http_client.py # Script for sending pipeline prediction requests in http mode + pipeline_rpc_client.py # Script for sending pipeline prediction requests in rpc mode + recognition_web_service.py # Script to start the pipeline server + readme.md # Recognition model service deployment documents + run_cpp_serving.sh # Script to start C++ Pipeline Serving deployment + test_cpp_serving_client.py # Script for sending C++ Pipeline serving prediction requests by rpc + ``` + + +#### 3.2.1 Python Serving + +- Start the service: + ```shell + # Start the service and save the running log in log.txt + python3.7 recognition_web_service.py &>log.txt & + ``` + +- send request: + ```shell + python3.7 pipeline_http_client.py + ``` + After a successful run, the results of the model prediction will be printed in the cmd window, and the results are as follows: + ```log + {'err_no': 0, 'err_msg': '', 'key': ['result'], 'value': ["[{'bbox': [345, 95, 524, 576], 'rec_docs': 'Red Bull-Enhanced', 'rec_scores': 0.79903316}]"], 'tensors': []} + ``` + + +#### 3.2.2 C++ Serving + +Different from Python Serving, the C++ Serving client calls C++ OP to predict, so before starting the service, you need to compile and install the serving server package, and set `SERVING_BIN`. +- Compile and install the Serving server package + ```shell + # Enter the working directory + cd PaddleClas/deploy/paddleserving + # One-click compile and install Serving server, set SERVING_BIN + source ./build_server.sh python3.7 + ``` + **Note:** The path set by [build_server.sh](../build_server.sh#L55-L62) may need to be modified according to the actual machine environment such as CUDA, python version, etc., and then compiled. + +- The input and output format used by C++ Serving is different from that of Python, so you need to execute the following command to overwrite the files below [3.1] (#31-model conversion) by copying the 4 files to get the corresponding 4 prototxt files in the folder. + ```shell + # Enter PaddleClas/deploy directory + cd PaddleClas/deploy/ + + # Overwrite prototxt file + \cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ./models/general_PPLCNet_x2_5_lite_v1.0_serving/ + \cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ./models/general_PPLCNet_x2_5_lite_v1.0_client/ + \cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ + \cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ + ``` + +- Start the service: + ```shell + # Enter the working directory + cd PaddleClas/deploy/paddleserving/recognition + + # The default port number is 9400; the running log is saved in log_PPShiTu.txt by default + # CPU deployment + sh run_cpp_serving.sh + # GPU deployment, and specify card 0 + sh run_cpp_serving.sh 0 + ``` + +- send request: + ```shell + # send service request + python3.7 test_cpp_serving_client.py + ``` + After a successful run, the results of the model predictions are printed in the client's terminal window as follows: + ```log + WARNING: Logging before InitGoogleLogging() is written to STDERR + I0614 03:01:36.273097 6084 naming_service_thread.cpp:202] brpc::policy::ListNamingService("127.0.0.1:9400"): added 1 + I0614 03:01:37.393564 6084 general_model.cpp:490] [client]logid=0,client_cost=1107.82ms,server_cost=1101.75ms. + [{'bbox': [345, 95, 524, 585], 'rec_docs': 'Red Bull-Enhanced', 'rec_scores': 0.8073724}] + ``` + +- close the service: + If the service program is running in the foreground, you can press `Ctrl+C` to terminate the server program; if it is running in the background, you can use the kill command to close related processes, or you can execute the following command in the path where the service program is started to terminate the server program: + ```bash + python3.7 -m paddle_serving_server.serve stop + ``` + After the execution is completed, the `Process stopped` message appears, indicating that the service was successfully shut down. + + +## 4. FAQ + +**Q1**: No result is returned after the request is sent or an output decoding error is prompted + +**A1**: Do not set the proxy when starting the service and sending the request. You can close the proxy before starting the service and sending the request. The command to close the proxy is: +```shell +unset https_proxy +unset http_proxy +``` +**Q2**: nothing happens after starting the service + +**A2**: You can check whether the path corresponding to `model_config` in `config.yml` exists, and whether the folder name is correct + +For more service deployment types, such as `RPC prediction service`, you can refer to Serving's [github official website](https://github.com/PaddlePaddle/Serving/tree/v0.9.0/examples) diff --git a/docs/en/inference_deployment/whl_deploy_en.md b/docs/en/inference_deployment/whl_deploy_en.md index 224d41a7c1f2de9886fd830a36b8910dae0f97b6..e2666458a27f55bdb44f5fcb2646ba9107e80163 100644 --- a/docs/en/inference_deployment/whl_deploy_en.md +++ b/docs/en/inference_deployment/whl_deploy_en.md @@ -1,6 +1,6 @@ # PaddleClas wheel package -Paddleclas supports Python WHL package for prediction. At present, WHL package only supports image classification, but does not support subject detection, feature extraction and vector retrieval. +PaddleClas supports Python wheel package for prediction. At present, PaddleClas wheel supports image classification including ImagetNet1k models and PULC models, but does not support mainbody detection, feature extraction and vector retrieval. --- @@ -8,8 +8,10 @@ Paddleclas supports Python WHL package for prediction. At present, WHL package o - [1. Installation](#1) - [2. Quick Start](#2) + - [2.1 ImageNet1k models](#2.1) + - [2.2 PULC models](#2.2) - [3. Definition of Parameters](#3) -- [4. Usage](#4) +- [4. More usage](#4) - [4.1 View help information](#4.1) - [4.2 Prediction using inference model provide by PaddleClas](#4.2) - [4.3 Prediction using local model files](#4.3) @@ -20,6 +22,7 @@ Paddleclas supports Python WHL package for prediction. At present, WHL package o - [4.8 Specify the mapping between class id and label name](#4.8) + ## 1. Installation * installing from pypi @@ -36,8 +39,14 @@ pip3 install dist/* ``` + ## 2. Quick Start -* Using the `ResNet50` model provided by PaddleClas, the following image(`'docs/images/inference_deployment/whl_demo.jpg'`) as an example. + + + +### 2.1 ImageNet1k models + +Using the `ResNet50` model provided by PaddleClas, the following image(`'docs/images/inference_deployment/whl_demo.jpg'`) as an example. ![](../../images/inference_deployment/whl_demo.jpg) @@ -68,25 +77,88 @@ filename: docs/images/inference_deployment/whl_demo.jpg, top-5, class_ids: [8, 7 Predict complete! ``` + + +### 2.2 PULC models + +PULC integrates various state-of-the-art algorithms such as backbone network, data augmentation and distillation, etc., and finally can automatically obtain a lightweight and high-precision image classification model. + +PaddleClas provides a series of test cases, which contain demos of different scenes about people, cars, OCR, etc. Click [here](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip) to download the data. + +Prection using the PULC "Human Exists Classification" model provided by PaddleClas: + +* Python + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="person_exists") +result = model.predict(input_data="pulc_demo_imgs/person_exists/objects365_01780782.jpg") +print(next(result)) +``` + +``` +>>> result +[{'class_ids': [0], 'scores': [0.9955421453341842], 'label_names': ['nobody'], 'filename': 'pulc_demo_imgs/person_exists/objects365_01780782.jpg'}] +``` + +`Nobody` means there is no one in the image, `someone` means there is someone in the image. Therefore, the prediction result indicates that there is no one in the figure. + +**Note**: `model.predict()` is a generator, so `next()` or `for` is needed to call it. This would to predict by batch that length is `batch_size`, default by 1. You can specify the argument `batch_size` and `model_name` when instantiating PaddleClas object, for example: `model = paddleclas.PaddleClas(model_name="person_exists", batch_size=2)`. Please refer to [Supported Model List](#PULC_Models) for the supported model list. + +* CLI + +```bash +paddleclas --model_name=person_exists --infer_imgs=pulc_demo_imgs/person_exists/objects365_01780782.jpg +``` + +``` +>>> result +class_ids: [0], scores: [0.9955421453341842], label_names: ['nobody'], filename: pulc_demo_imgs/person_exists/objects365_01780782.jpg +Predict complete! +``` + +**Note**: The "--infer_imgs" argument specify the image(s) to be predict, and you can also specify a directoy contains images. If use other model, you can specify the `--model_name` argument. Please refer to [Supported Model List](#PULC_Models) for the supported model list. + + + +**Supported Model List** + +The name of PULC series models are as follows: + +| Name | Intro | +| --- | --- | +| person_exists | Human Exists Classification | +| person_attribute | Pedestrian Attribute Classification | +| safety_helmet | Classification of Wheather Wearing Safety Helmet | +| traffic_sign | Traffic Sign Classification | +| vehicle_attribute | Vehicle Attribute Classification | +| car_exists | Car Exists Classification | +| text_image_orientation | Text Image Orientation Classification | +| textline_orientation | Text-line Orientation Classification | +| language_classification | Language Classification | + +Please refer to [Human Exists Classification](../PULC/PULC_person_exists_en.md)、[Pedestrian Attribute Classification](../PULC/PULC_person_attribute_en.md)、[Classification of Wheather Wearing Safety Helmet](../PULC/PULC_safety_helmet_en.md)、[Traffic Sign Classification](../PULC/PULC_traffic_sign_en.md)、[Vehicle Attribute Classification](../PULC/PULC_vehicle_attribute_en.md)、[Car Exists Classification](../PULC/PULC_car_exists_en.md)、[Text Image Orientation Classification](../PULC/PULC_text_image_orientation_en.md)、[Text-line Orientation Classification](../PULC/PULC_textline_orientation_en.md)、[Language Classification](../PULC/PULC_language_classification_en.md) for more information about different scenarios. + + ## 3. Definition of Parameters The following parameters can be specified in Command Line or used as parameters of the constructor when instantiating the PaddleClas object in Python. * model_name(str): If using inference model based on ImageNet1k provided by Paddle, please specify the model's name by the parameter. * inference_model_dir(str): Local model files directory, which is valid when `model_name` is not specified. The directory should contain `inference.pdmodel` and `inference.pdiparams`. * infer_imgs(str): The path of image to be predicted, or the directory containing the image files, or the URL of the image from Internet. -* use_gpu(bool): Whether to use GPU or not, default by `True`. -* gpu_mem(int): GPU memory usages,default by `8000`。 -* use_tensorrt(bool): Whether to open TensorRT or not. Using it can greatly promote predict preformance, default by `False`. -* enable_mkldnn(bool): Whether enable MKLDNN or not, default `False`. -* cpu_num_threads(int): Assign number of cpu threads, valid when `--use_gpu` is `False` and `--enable_mkldnn` is `True`, default by `10`. -* batch_size(int): Batch size, default by `1`. -* resize_short(int): Resize the minima between height and width into `resize_short`, default by `256`. -* crop_size(int): Center crop image to `crop_size`, default by `224`. -* topk(int): Print (return) the `topk` prediction results, default by `5`. -* class_id_map_file(str): The mapping file between class ID and label, default by `ImageNet1K` dataset's mapping. -* pre_label_image(bool): whether prelabel or not, default=False. -* save_dir(str): The directory to save the prediction results that can be used as pre-label, default by `None`, that is, not to save. +* use_gpu(bool): Whether to use GPU or not. +* gpu_mem(int): GPU memory usages. +* use_tensorrt(bool): Whether to open TensorRT or not. Using it can greatly promote predict preformance. +* enable_mkldnn(bool): Whether enable MKLDNN or not. +* cpu_num_threads(int): Assign number of cpu threads, valid when `--use_gpu` is `False` and `--enable_mkldnn` is `True`. +* batch_size(int): Batch size. +* resize_short(int): Resize the minima between height and width into `resize_short`. +* crop_size(int): Center crop image to `crop_size`. +* topk(int): Print (return) the `topk` prediction results when Topk postprocess is used. +* threshold(float): The threshold of ThreshOutput when postprocess is used. +* class_id_map_file(str): The mapping file between class ID and label. +* save_dir(str): The directory to save the prediction results that can be used as pre-label. **Note**: If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `resize_short=384`, `resize=384`. The following is a demo. @@ -103,6 +175,7 @@ clas = PaddleClas(model_name='ViT_base_patch16_384', resize_short=384, crop_size ``` + ## 4. Usage PaddleClas provides two ways to use: @@ -110,6 +183,7 @@ PaddleClas provides two ways to use: 2. Bash command line programming. + ### 4.1 View help information * CLI @@ -118,6 +192,7 @@ paddleclas -h ``` + ### 4.2 Prediction using inference model provide by PaddleClas You can use the inference model provided by PaddleClas to predict, and only need to specify `model_name`. In this case, PaddleClas will automatically download files of specified model and save them in the directory `~/.paddleclas/`. @@ -136,6 +211,7 @@ paddleclas --model_name='ResNet50' --infer_imgs='docs/images/inference_deploymen ``` + ### 4.3 Prediction using local model files You can use the local model files trained by yourself to predict, and only need to specify `inference_model_dir`. Note that the directory must contain `inference.pdmodel` and `inference.pdiparams`. @@ -154,6 +230,7 @@ paddleclas --inference_model_dir='./inference/' --infer_imgs='docs/images/infere ``` + ### 4.4 Prediction by batch You can predict by batch, only need to specify `batch_size` when `infer_imgs` is direcotry contain image files. @@ -173,6 +250,7 @@ paddleclas --model_name='ResNet50' --infer_imgs='docs/images/' --batch_size 2 ``` + ### 4.5 Prediction of Internet image You can predict the Internet image, only need to specify URL of Internet image by `infer_imgs`. In this case, the image file will be downloaded and saved in the directory `~/.paddleclas/images/`. @@ -191,6 +269,7 @@ paddleclas --model_name='ResNet50' --infer_imgs='https://raw.githubusercontent.c ``` + ### 4.6 Prediction of NumPy.array format image In Python code, you can predict the `NumPy.array` format image, only need to use the `infer_imgs` to transfer variable of image data. Note that the models in PaddleClas only support to predict 3 channels image data, and channels order is `RGB`. @@ -205,6 +284,7 @@ print(next(result)) ``` + ### 4.7 Save the prediction result(s) You can save the prediction result(s) as pre-label, only need to use `pre_label_out_dir` to specify the directory to save. @@ -212,17 +292,18 @@ You can save the prediction result(s) as pre-label, only need to use `pre_label_ ```python from paddleclas import PaddleClas clas = PaddleClas(model_name='ResNet50', save_dir='./output_pre_label/') -infer_imgs = 'docs/images/inference_deployment/whl_' # it can be infer_imgs folder path which contains all of images you want to predict. +infer_imgs = 'docs/images/' # it can be infer_imgs folder path which contains all of images you want to predict. result=clas.predict(infer_imgs) print(next(result)) ``` * CLI ```bash -paddleclas --model_name='ResNet50' --infer_imgs='docs/images/inference_deployment/whl_' --save_dir='./output_pre_label/' +paddleclas --model_name='ResNet50' --infer_imgs='docs/images/' --save_dir='./output_pre_label/' ``` + ### 4.8 Specify the mapping between class id and label name You can specify the mapping between class id and label name, only need to use `class_id_map_file` to specify the mapping file. PaddleClas uses ImageNet1K's mapping by default. diff --git a/docs/en/models/MobileViT_en.md b/docs/en/models/MobileViT_en.md index 96b5e8260e51cf062b7da74b449140eb1bc68dd0..2aebdebfe9821e46688662fe3e6b4a460bcec1db 100644 --- a/docs/en/models/MobileViT_en.md +++ b/docs/en/models/MobileViT_en.md @@ -18,6 +18,6 @@ MobileViT is a lightweight visual Transformer network that can be used as a gene | Models | Top1 | Top5 | Reference
top1 | Reference
top5 | FLOPs
(M) | Params
(M) | |:--:|:--:|:--:|:--:|:--:|:--:|:--:| -| MobileViT_XXS | 0.6867 | 0.8878 | 0.690 | - | 1849.35 | 5.59 | +| MobileViT_XXS | 0.6867 | 0.8878 | 0.690 | - | 337.24 | 1.28 | | MobileViT_XS | 0.7454 | 0.9227 | 0.747 | - | 930.75 | 2.33 | -| MobileViT_S | 0.7814 | 0.9413 | 0.783 | - | 337.24 | 1.28 | +| MobileViT_S | 0.7814 | 0.9413 | 0.783 | - | 1849.35 | 5.59 | diff --git a/docs/images/PP-HGNet/PP-HGNet-block.png b/docs/images/PP-HGNet/PP-HGNet-block.png new file mode 100644 index 0000000000000000000000000000000000000000..615aa383bb70d9d6a8d3d3e2860bfce3595515a6 Binary files /dev/null and b/docs/images/PP-HGNet/PP-HGNet-block.png differ diff --git a/docs/images/PP-HGNet/PP-HGNet.png b/docs/images/PP-HGNet/PP-HGNet.png new file mode 100644 index 0000000000000000000000000000000000000000..cb5b18fe4e9decc14c68e9cee9aeeed172d3a844 Binary files /dev/null and b/docs/images/PP-HGNet/PP-HGNet.png differ diff --git a/docs/images/PP-LCNetV2/net.png b/docs/images/PP-LCNetV2/net.png new file mode 100644 index 0000000000000000000000000000000000000000..079f5ab43f2d0da67c49f1bf33d2648ab8d3f176 Binary files /dev/null and b/docs/images/PP-LCNetV2/net.png differ diff --git a/docs/images/PP-LCNetV2/rep.png b/docs/images/PP-LCNetV2/rep.png new file mode 100644 index 0000000000000000000000000000000000000000..0e94220fd7cb5b1732754d7102db830af62aaf30 Binary files /dev/null and b/docs/images/PP-LCNetV2/rep.png differ diff --git a/docs/images/PP-LCNetV2/shortcut.png b/docs/images/PP-LCNetV2/shortcut.png new file mode 100644 index 0000000000000000000000000000000000000000..d8024d48b20b9cac0c7cbddf12df799180ff82d6 Binary files /dev/null and b/docs/images/PP-LCNetV2/shortcut.png differ diff --git a/docs/images/PP-LCNetV2/split_pw.png b/docs/images/PP-LCNetV2/split_pw.png new file mode 100644 index 0000000000000000000000000000000000000000..f48800a173309e0ef9d998cc06764615db5bd4db Binary files /dev/null and b/docs/images/PP-LCNetV2/split_pw.png differ diff --git a/docs/images/PULC/docs/car_exists_data_demo.jpeg b/docs/images/PULC/docs/car_exists_data_demo.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..9959954b6b8bf27589e1d2081f86c6078d16e2c1 Binary files /dev/null and b/docs/images/PULC/docs/car_exists_data_demo.jpeg differ diff --git a/docs/images/PULC/docs/language_classification_original_data.png b/docs/images/PULC/docs/language_classification_original_data.png new file mode 100644 index 0000000000000000000000000000000000000000..42c4a03ebe3df6b4563e6f006d61faa0a4b1fdea Binary files /dev/null and b/docs/images/PULC/docs/language_classification_original_data.png differ diff --git a/docs/images/PULC/docs/person_attribute_data_demo.png b/docs/images/PULC/docs/person_attribute_data_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..c9b276af0a554bbe07d807224d56fbbe5e2b7400 Binary files /dev/null and b/docs/images/PULC/docs/person_attribute_data_demo.png differ diff --git a/docs/images/PULC/docs/person_exists_data_demo.png b/docs/images/PULC/docs/person_exists_data_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..b74ab64b6f62b83880aa426c1d05cb1fc53840e4 Binary files /dev/null and b/docs/images/PULC/docs/person_exists_data_demo.png differ diff --git a/docs/images/PULC/docs/safety_helmet_data_demo.jpg b/docs/images/PULC/docs/safety_helmet_data_demo.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70bd2d952fd20e6f8fe39182914e400177d913c4 Binary files /dev/null and b/docs/images/PULC/docs/safety_helmet_data_demo.jpg differ diff --git a/docs/images/PULC/docs/text_image_orientation_data_demo.png b/docs/images/PULC/docs/text_image_orientation_data_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..756b18e03077f7c631deb39390aa84ba0f4580ae Binary files /dev/null and b/docs/images/PULC/docs/text_image_orientation_data_demo.png differ diff --git a/docs/images/PULC/docs/text_image_orientation_original_data.png b/docs/images/PULC/docs/text_image_orientation_original_data.png new file mode 100644 index 0000000000000000000000000000000000000000..9014179214224c21f50a595f414617ab12538b8e Binary files /dev/null and b/docs/images/PULC/docs/text_image_orientation_original_data.png differ diff --git a/docs/images/PULC/docs/textline_orientation_data_demo.png b/docs/images/PULC/docs/textline_orientation_data_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..fcb48732026e48e14a616967ee06904c2feb9449 Binary files /dev/null and b/docs/images/PULC/docs/textline_orientation_data_demo.png differ diff --git a/docs/images/PULC/docs/traffic_sign_data_demo.png b/docs/images/PULC/docs/traffic_sign_data_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..6fac97a299b6fbf037a931f7ba56607f791271f3 Binary files /dev/null and b/docs/images/PULC/docs/traffic_sign_data_demo.png differ diff --git a/docs/images/PULC/docs/vehicle_attribute_data_demo.png b/docs/images/PULC/docs/vehicle_attribute_data_demo.png new file mode 100644 index 0000000000000000000000000000000000000000..68c67acb331de19b688b9b9111fb8c20ff42fc2a Binary files /dev/null and b/docs/images/PULC/docs/vehicle_attribute_data_demo.png differ diff --git a/docs/images/action_rec_by_classification.gif b/docs/images/action_rec_by_classification.gif new file mode 100644 index 0000000000000000000000000000000000000000..52046b249b145d2099c7360d3c56abc3b51764bd Binary files /dev/null and b/docs/images/action_rec_by_classification.gif differ diff --git a/docs/images/algorithm_introduction/hnsw.png b/docs/images/algorithm_introduction/hnsw.png new file mode 100644 index 0000000000000000000000000000000000000000..eeacd32bd31e690bca2363932ca7ab9d78750313 Binary files /dev/null and b/docs/images/algorithm_introduction/hnsw.png differ diff --git a/docs/images/class_simple.gif b/docs/images/class_simple.gif new file mode 100644 index 0000000000000000000000000000000000000000..c30122dfa239e14901738f0c6583be6a259d339f Binary files /dev/null and b/docs/images/class_simple.gif differ diff --git a/docs/images/class_simple_en.gif b/docs/images/class_simple_en.gif new file mode 100644 index 0000000000000000000000000000000000000000..14c3a678f6b0ba81b7761c397ddc97826817409a Binary files /dev/null and b/docs/images/class_simple_en.gif differ diff --git a/docs/images/classification.gif b/docs/images/classification.gif new file mode 100644 index 0000000000000000000000000000000000000000..db2ff2a56be31793402a350f68e59eb924d7c1bf Binary files /dev/null and b/docs/images/classification.gif differ diff --git a/docs/images/classification_en.gif b/docs/images/classification_en.gif new file mode 100644 index 0000000000000000000000000000000000000000..884d5ba1453a3c717a9060e3a9831ea6e5160e7d Binary files /dev/null and b/docs/images/classification_en.gif differ diff --git a/docs/images/reid/reid_overview.jpg b/docs/images/reid/reid_overview.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2562a529b537113b7b7a1ab65a4528a23b8bd8d3 Binary files /dev/null and b/docs/images/reid/reid_overview.jpg differ diff --git a/docs/images/reid/strong-baseline.jpg b/docs/images/reid/strong-baseline.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26b0b1aa34b5157efbd2e6bb30371c7581ac897a Binary files /dev/null and b/docs/images/reid/strong-baseline.jpg differ diff --git a/docs/zh_CN/PULC/PULC_car_exists.md b/docs/zh_CN/PULC/PULC_car_exists.md new file mode 100644 index 0000000000000000000000000000000000000000..4107363534f9c76508d660ffb7d69dc705076a1a --- /dev/null +++ b/docs/zh_CN/PULC/PULC_car_exists.md @@ -0,0 +1,470 @@ +# PULC 有车/无车分类模型 + +------ + + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图像](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的有车/无车的分类模型。该模型可以广泛应用于如监控场景、海量数据过滤场景等。 + +下表列出了判断图片中是否有车的二分类模型的相关指标,前两行展现了使用 SwinTranformer_tiny 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第三行至第六行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + SKL-UGI 知识蒸馏策略训练得到的模型的相关指标。 + + +| 模型 | Tpr(%)@Fpr0.01 | 延时(ms) | 存储(M) | 策略 | +|-------|----------------|----------|---------------|---------------| +| SwinTranformer_tiny | 97.71 | 95.30 | 111 | 使用 ImageNet 预训练模型 | +| MobileNetV3_small_x0_35 | 81.23 | 2.85 | 2.7 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 94.72 | 2.12 | 7.1 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 95.48 | 2.12 | 7.1 | 使用 SSLD 预训练模型 | +| PPLCNet_x1_0 | 95.48 | 2.12 | 7.1 | 使用 SSLD 预训练模型+EDA 策略| +| PPLCNet_x1_0 | 95.92 | 2.12 | 7.1 | 使用 SSLD 预训练模型+EDA 策略+SKL-UGI 知识蒸馏策略| + +从表中可以看出,backbone 为 SwinTranformer_tiny 时精度较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度可以大幅提升,但是会导致精度大幅下降。将 backbone 替换为速度更快的 PPLCNet_x1_0 时,精度较 MobileNetV3_small_x0_35 高 13 个百分点,与此同时速度依旧可以快 20% 以上。在此基础上,使用 SSLD 预训练模型后,在不改变推理速度的前提下,精度可以提升约 0.7 个百分点,进一步地,在使用 SKL-UGI 知识蒸馏后,精度可以继续提升 0.44 个百分点。此时,PPLCNet_x1_0 达到了接近 SwinTranformer_tiny 模型的精度,但是速度快 40 多倍。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* `Tpr`指标的介绍可以参考 [3.3节](#3.3)的备注部分,延时是基于 Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 测试得到,开启 MKLDNN 加速策略,线程数为10。 +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=car_exists --infer_imgs=pulc_demo_imgs/car_exists/objects365_00001507.jpeg +``` + +结果如下: +``` +>>> result +class_ids: [1], scores: [0.9871138], label_names: ['contains_car'], filename: pulc_demo_imgs/car_exists/objects365_00001507.jpeg +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="car_exists") +result = model.predict(input_data="pulc_demo_imgs/car_exists/objects365_00001507.jpeg") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="car_exists", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'class_ids': [1], 'scores': [0.9871138], 'label_names': ['contains_car'], 'filename': 'pulc_demo_imgs/car_exists/objects365_00001507.jpeg'}] +``` + + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档[环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +本案例中所使用的所有数据集均为开源数据,`train`和`val` 集合均为[Objects365 数据](https://www.objects365.org/overview.html)的子集,`ImageNet_val` 为[ImageNet-1k 数据](https://www.image-net.org/)的验证集。 + + + +#### 3.2.2 数据集获取 + +在公开数据集的基础上经过后处理即可得到本案例需要的数据,具体处理方法如下: + +- 训练集合,本案例处理了 Objects365 数据训练集的标注文件,如果某张图含有“car”的标签,且这个框的面积在整张图中的比例大于 10%,即认为该张图中含有车,如果某张图中没有任何与交通工具,例如car、bus等相关的的标签,则认为该张图中不含有车。经过处理后,得到 108629 条可用数据,其中有车的数据有 27422 条,无车的数据 81207 条。 + +- 验证集合,处理方法与训练集相同,数据来源于 Objects365 数据集的验证集。为了测试结果准确,验证集经过人工校正,去除了一些可能存在标注错误的图像。 + +* 注:由于objects365的标签并不是完全互斥的,例如F1赛车可能是 "F1 Formula",也可能被标称"car"。为了减轻干扰,我们仅保留"car"标签作为有车,而将不含任何交通工具的图作为无车。 + +处理后的数据集部分数据可视化如下: + +![](../../images/PULC/docs/car_exists_data_demo.jpeg) + +此处提供了经过上述方法处理好的数据,可以直接下载得到。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压有车/无车场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/car_exists.tar +tar -xf car_exists.tar +cd ../ +``` + +执行上述命令后,`dataset/` 下存在 `car_exists` 目录,该目录中具有以下数据: + +``` + +├── objects365_car +│   ├── objects365_00000039.jpg +│   ├── objects365_00000099.jpg +├── ImageNet_val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +... +├── train_list.txt +├── train_list.txt.debug +├── train_list_for_distill.txt +├── val_list.txt +└── val_list.txt.debug +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件,`train_list.txt.debug` 和 `val_list.txt.debug` 分别为训练集和验证集的 `debug` 标签文件,其分别是 `train_list.txt` 和 `val_list.txt` 的子集,用该文件可以快速体验本案例的流程。`ImageNet_val/` 是 ImageNet-1k 的验证集,该集合和 `train` 集合的混合数据用于本案例的 `SKL-UGI知识蒸馏策略`,对应的训练标签文件为 `train_list_for_distill.txt` 。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考 [PaddleClas 分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + +* 关于如何得到蒸馏的标签文件可以参考[知识蒸馏标签获得方法](../advanced_tutorials/ssld.md#3.2)。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml` 中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml +``` + +验证集的最佳指标在 `0.95-0.96` 之间(数据集较小,容易造成波动)。 + +**备注:** + +* 此时使用的指标为Tpr,该指标描述了在假正类率(Fpr)小于某一个指标时的真正类率(Tpr),是产业中二分类问题常用的指标之一。在本案例中,Fpr 为 1/100 。关于 Fpr 和 Tpr 的更多介绍,可以参考[这里](https://baike.baidu.com/item/AUC/19282953)。 + +* 在eval时,会打印出来当前最佳的 TprAtFpr 指标,具体地,其会打印当前的 `Fpr`、`Tpr` 值,以及当前的 `threshold`值,`Tpr` 值反映了在当前 `Fpr` 值下的召回率,该值越高,代表模型越好。`threshold` 表示当前最佳 `Fpr` 所对应的分类阈值,可用于后续模型部署落地等。 + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [1], 'scores': [0.9871138], 'label_names': ['contains_car'], 'filename': 'deploy/images/PULC/car_exists/objects365_00001507.jpeg'}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `deploy/images/PULC/car_exists/objects365_00001507.jpeg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 二分类默认的阈值为0.5, 如果需要指定阈值,可以重写 `Infer.PostProcess.threshold` ,如`-o Infer.PostProcess.threshold=0.9794`,该值需要根据实际场景来确定,此处的 `0.9794` 是在该场景中的 `val` 数据集在百分之一 Fpr 下得到的最佳 Tpr 所得到的。 + + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md)。 + + + +#### 4.1.1 教师模型训练 + +复用 `ppcls/configs/PULC/car_exists/PPLCNet/PPLCNet_x1_0.yaml` 中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +验证集的最佳指标为 `0.96-0.98` 之间,当前教师模型最好的权重保存在 `output/ResNet101_vd/best_model.pdparams`。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型,使用ImageNet数据集的验证集作为新增的无标签数据。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +验证集的最佳指标为 `0.95-0.97` 之间,当前模型最好的权重保存在 `output/DistillationModel/best_model_student.pdparams`。 + + + + +## 5. 超参搜索 + +在 [3.3 节](#3.3)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用 MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于 Paddle Inference 推理引擎的介绍,可以参考 [Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_car_exists_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_car_exists_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_car_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在`output/PPLCNet_x1_0/best_model.pdparams`中。 + + + +### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/car_exists_infer.tar && tar -xf car_exists_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── car_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/car_exists/objects365_00001507.jpeg` 进行有人/无人分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/car_exists/inference_car_exists.yaml +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/car_exists/inference_car_exists.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +objects365_00001507.jpeg: class id(s): [1], score(s): [0.99], label_name(s): ['contains_car'] +``` + + +**备注:** 二分类默认的阈值为0.5, 如果需要指定阈值,可以重写 `Infer.PostProcess.threshold` ,如`-o Infer.PostProcess.threshold=0.9794`,该值需要根据实际场景来确定,此处的 `0.9794` 是在该场景中的 `val` 数据集在百分之一 Fpr 下得到的最佳 Tpr 所得到的。该阈值的确定方法可以参考[3.3节](#3.3)备注部分。 + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/car_exists/inference_car_exists.yaml -o Global.infer_imgs="./images/PULC/car_exists/" +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +objects365_00001507.jpeg: class id(s): [1], score(s): [0.99], label_name(s): ['contains_car'] +objects365_00001521.jpeg: class id(s): [0], score(s): [0.99], label_name(s): ['no_car'] +``` + +其中,`contains_car` 表示该图里存在车,`no_car` 表示该图里不存在车。 + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_language_classification.md b/docs/zh_CN/PULC/PULC_language_classification.md new file mode 100644 index 0000000000000000000000000000000000000000..309f3e9cc8a0c3c519722baeb13e5b90a8312e51 --- /dev/null +++ b/docs/zh_CN/PULC/PULC_language_classification.md @@ -0,0 +1,453 @@ +# PULC 语种分类模型 + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图片](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的语种分类模型。使用该方法训练得到的模型可以快速判断图片中的文字语种,该模型可以广泛应用于金融、政务等各种涉及多语种OCR处理的场景中。 + +下表列出了语种分类模型的相关指标,前两行展现了使用 SwinTranformer_tiny 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第三行至第六行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + SKL-UGI 知识蒸馏策略训练得到的模型的相关指标。其中替换 backbone 为 PPLCNet_x1_0时,将数据预处理时的输入尺寸变为[192,48],且网络的下采样stride调整为[2, [2, 1], [2, 1], [2, 1], [2, 1]]。 + +| 模型 | 精度 | 延时 | 存储 | 策略 | +| ----------------------- | --------- | -------- | ------- | ---------------------------------------------- | +| SwinTranformer_tiny | 98.12 | 89.09 | 111 | 使用ImageNet预训练模型 | +| MobileNetV3_small_x0_35 | 95.92 | 2.98 | 3.7 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 98.35 | 2.58 | 7.1 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 98.7 | 2.58 | 7.1 | 使用SSLD预训练模型 | +| PPLCNet_x1_0 | 99.12 | 2.58 | 7.1 | 使用SSLD预训练模型+EDA策略 | +| **PPLCNet_x1_0** | **99.26** | **2.58** | **7.1** | 使用SSLD预训练模型+EDA策略+SKL-UGI知识蒸馏策略 | + +从表中可以看出,backbone 为 SwinTranformer_tiny 时精度比较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度提升明显,但精度有了大幅下降。将 backbone 替换为 PPLCNet_x1_0 且调整预处理输入尺寸和网络的下采样stride时,速度略为提升,同时精度较 MobileNetV3_large_x1_0 高2.43个百分点。在此基础上,使用 SSLD 预训练模型后,在不改变推理速度的前提下,精度可以提升 0.35 个百分点,进一步地,当融合EDA策略后,精度可以再提升 0.42 个百分点,最后,在使用 SKL-UGI 知识蒸馏后,精度可以继续提升 0.14 个百分点。此时,PPLCNet_x1_0 超过了 SwinTranformer_tiny 模型的精度,并且速度有了明显提升。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=language_classification --infer_imgs=pulc_demo_imgs/language_classification/word_35404.png +``` + +结果如下: +``` +>>> result +class_ids: [4, 6], scores: [0.88672, 0.01434], label_names: ['japan', 'korean'], filename: pulc_demo_imgs/language_classification/word_35404.png +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="language_classification") +result = model.predict(input_data="pulc_demo_imgs/language_classification/word_35404.png") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="language_classification", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'class_ids': [4, 6], 'scores': [0.88672, 0.01434], 'label_names': ['japan', 'korean'], 'filename': 'pulc_demo_imgs/language_classification/word_35404.png'}] +``` + + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +[第1节](#1)中提供的模型使用内部数据训练得到,该数据集暂时不方便公开。这里基于 [Multi-lingual scene text detection and recognition](https://rrc.cvc.uab.es/?ch=15&com=downloads) 开源数据集构造了一个多语种demo数据集,用于体验本案例的预测过程。 + +![](../../images/PULC/docs/language_classification_original_data.png) + + + +#### 3.2.2 数据集获取 + +[第1节](#1)中提供的模型共支持10个类别,分别为: + +`0` 表示阿拉伯语(arabic);`1` 表示中文繁体(chinese_cht);`2` 表示斯拉夫语(cyrillic);`3` 表示梵文(devanagari);`4` 表示日语(japan);`5` 表示卡纳达文(ka);`6` 表示韩语(korean);`7` 表示泰米尔文(ta);`8` 表示泰卢固文(te);`9` 表示拉丁语(latin)。 + +在 Multi-lingual scene text detection and recognition 数据集中,仅包含了阿拉伯语、日语、韩语和拉丁语数据,这里分别将 4 个语种的数据各抽取 1600 张作为本案例的训练数据,300 张作为测试数据,以及 400 张作为补充数据和训练数据混合用于本案例的`SKL-UGI知识蒸馏策略`实验。 + +因此,对于本案例中的demo数据集,类别为: + +`0` 表示阿拉伯语(arabic);`1` 表示日语(japan);`2` 表示韩语(korean);`3` 表示拉丁语(latin)。 + +如果想要制作自己的多语种数据集,可以按照需求收集并整理自己任务中需要语种的数据,此处提供了经过上述方法处理好的demo数据,可以直接下载得到。 + +**备注:** 语种分类任务中的图片数据需要将整图中的文字区域抠取出来,仅仅使用文本行部分作为图片数据。 + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压多语种场景的demo数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/language_classification.tar +tar -xf language_classification.tar +cd ../ +``` + +执行上述命令后,`dataset/`下存在`language_classification`目录,该目录中具有以下数据: + +``` +├── img +│ ├── word_1.png +│ ├── word_2.png +... +├── train_list.txt +├── train_list_for_distill.txt +├── test_list.txt +└── label_list.txt +``` + +其中`img/`存放了 4 种语言总计 9200 张数据。`train_list.txt`和`test_list.txt`分别为训练集和验证集的标签文件,`label_list.txt`是 4 类语言分类模型对应的类别列表,`SKL-UGI 知识蒸馏策略`对应的训练标签文件为`train_list_for_distill.txt`。用这些图片可以快速体验本案例中模型的训练预测过程。 + +***备注:*** + +- 这里的`label_list.txt`是4类语种分类模型对应的类别列表,如果自己构造的数据集语种类别发生变化,需要自行调整。 +- 如果想要自己构造训练集和验证集,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + +### 3.3 模型训练 + +在`ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml`中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Arch.class_num=4 +``` + +- 由于本文档中的demo数据集的类别数量为 4,所以需要添加`-o Arch.class_num=4`来将模型的类别数量指定为4。 + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" \ + -o Arch.class_num=4 +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" \ + -o Arch.class_num=4 +``` + +输出结果如下: + +``` +[{'class_ids': [4, 9], 'scores': [0.96809, 0.01001], 'file_name': 'deploy/images/PULC/language_classification/word_35404.png', 'label_names': ['japan', 'latin']}] +``` + +***备注:*** + +- 其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 +- 默认是对 `deploy/images/PULC/language_classification/word_35404.png` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 +- 预测输出为top2的预测结果,`japan` 表示该图中文字语种识别为日语,`latin` 表示该图中文字语种识别为拉丁语。 + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md)。 + + + +#### 4.1.1 教师模型训练 + +复用`ppcls/configs/PULC/language_classification/PPLCNet/PPLCNet_x1_0.yaml`中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd \ + -o Arch.class_num=4 +``` + +当前教师模型最好的权重保存在`output/ResNet101_vd/best_model.pdparams`。 + +**备注:** 训练ResNet101_vd模型需要的显存较多,如果机器显存不够,可以将学习率和 batch size 同时缩小一定的倍数进行训练。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型,使用[3.2.2节](#3.2.2)中介绍的蒸馏数据作为新增的无标签数据。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model \ + -o Arch.class_num=4 +``` + +当前模型最好的权重保存在`output/DistillationModel/best_model_student.pdparams`。 + + + +## 5. 超参搜索 + +在 [3.2 节](#3.2)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +#### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_language_classification_infer +``` + +执行完该脚本后会在`deploy/models/`下生成`PPLCNet_x1_0_language_classification_infer`文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_language_classification_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在`output/PPLCNet_x1_0/best_model.pdparams`中。 + + + +#### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/language_classification_infer.tar && tar -xf language_classification_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── language_classification_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/language_classification/word_35404.png` 进行整图文字方向分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/language_classification/inference_language_classification.yaml +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/language_classification/inference_language_classification.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +word_35404.png: class id(s): [4, 6], score(s): [0.89, 0.01], label_name(s): ['japan', 'korean'] +``` + +其中,输出为top2的预测结果,`japan` 表示该图中文字语种为日语,`korean` 表示该图中文字语种为韩语。 + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/language_classification/inference_language_classification.yaml -o Global.infer_imgs="./images/PULC/language_classification/" +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +word_17.png: class id(s): [9, 4], score(s): [0.80, 0.09], label_name(s): ['latin', 'japan'] +word_20.png: class id(s): [0, 4], score(s): [0.91, 0.02], label_name(s): ['arabic', 'japan'] +word_35404.png: class id(s): [4, 6], score(s): [0.89, 0.01], label_name(s): ['japan', 'korean'] +``` + +其中,输出为top2的预测结果,`japan` 表示该图中文字语种为日语,`latin` 表示该图中文字语种为拉丁语,`arabic` 表示该图中文字语种为阿拉伯语,`korean` 表示该图中文字语种为韩语。 + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_model_list.md b/docs/zh_CN/PULC/PULC_model_list.md new file mode 100644 index 0000000000000000000000000000000000000000..4b2d7a8774d7d64a634a1bebc96481fc2ad076eb --- /dev/null +++ b/docs/zh_CN/PULC/PULC_model_list.md @@ -0,0 +1,25 @@ +# PULC 模型库 + +------ + +此处提供了 PULC 模型库的相关指标和模型的下载链接,其中预训练模型可以用来微调训练,推理模型可以直接用来预测和部署。 + + +|模型名称|模型简介|模型精度 |模型大小|推理耗时|下载地址| +| --- | --- | --- | --- | --- | --- | +| person_exists |[PULC有人/无人分类模型](PULC_person_exists.md)| 96.23 |7.0M|2.58ms|[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/person_exists_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/person_exists_pretrained.pdparams)| +| person_attribute |[PULC人体属性识别模型](PULC_person_attribute.md)| 78.59 |7.2M|2.01ms|[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/person_attribute_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/person_attribute_pretrained.pdparams)| +| safety_helmet |[PULC佩戴安全帽分类模型](PULC_safety_helmet.md)| 99.38 |7.1M|2.03ms|[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/safety_helmet_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/safety_helmet_pretrained.pdparams)| +| traffic_sign |[PULC交通标志分类模型](PULC_traffic_sign.md)| 98.35 |8.2M|2.10ms|[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/traffic_sign_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/traffic_sign_pretrained.pdparams)| +| vehicle_attribute |[PULC车辆属性识别模型](PULC_vehicle_attribute.md)| 90.81 |7.2M|2.36ms|[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/vehicle_attribute_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/vehicle_attribute_pretrained.pdparams)| +| car_exists |[PULC有车/无车分类模型](PULC_car_exists.md) | 95.92 | 7.1M | 2.38ms |[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/car_exists_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/car_exists_pretrained.pdparams)| +| text_image_orientation |[PULC含文字图像方向分类模型](PULC_text_image_orientation.md)| 99.06 | 7.1M | 2.16ms |[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/text_image_orientation_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/text_image_orientation_pretrained.pdparams)| +| textline_orientation |[PULC文本行方向分类模型](PULC_textline_orientation.md)| 96.01 |7.0M|2.72ms|[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/textline_orientation_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/textline_orientation_pretrained.pdparams)| +| language_classification |[PULC语种分类模型](PULC_language_classification.md)| 99.26 |7.1M|2.58ms|[推理模型](https://paddleclas.bj.bcebos.com/models/PULC/inference/language_classification_infer.tar) / [预训练模型](https://paddleclas.bj.bcebos.com/models/PULC/pretrained/language_classification_pretrained.pdparams)| + + +**备注:** + +* 以上所有的模型的 backbone 均为 PPLCNet_x1_0,部分模型大小不同是由于分类的输出大小不同导致的,推理耗时是基于Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 测试得到,其中测试过程开启 MKLDNN 加速策略,线程数为10。速度测试过程会有轻微波动。 + +* person_exists、safety_helmet、car_exists 的评测指标为 TprAtFpr,person_attribute、vehicle_attribute的评测指标为ma、traffic_sign、text_image_orientation、textline_orientation、language_classification的评测指标为Top-1 Acc。 diff --git a/docs/zh_CN/PULC/PULC_person_attribute.md b/docs/zh_CN/PULC/PULC_person_attribute.md new file mode 100644 index 0000000000000000000000000000000000000000..b6d833a9df9898b9e2d01746e520b0c386376e8d --- /dev/null +++ b/docs/zh_CN/PULC/PULC_person_attribute.md @@ -0,0 +1,453 @@ +# PULC 人体属性识别模型 + +------ + + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图像](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的人体属性识别模型。该模型可以广泛应用于行人分析、行人跟踪等场景。 + +下表列出了不同人体属性识别模型的相关指标,前三行展现了使用 SwinTransformer_tiny、Res2Net200_vd_26w_4s 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第四行至第七行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + SKL-UGI 知识蒸馏策略训练得到的模型的相关指标。 + + +| 模型 | mA(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| Res2Net200_vd_26w_4s | 81.25 | 77.51 | 293 | 使用ImageNet预训练模型 | +| SwinTransformer_tiny | 80.17 | 89.51 | 111 | 使用ImageNet预训练模型 | +| MobileNetV3_small_x0_35 | 70.79 | 2.90 | 1.7 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 76.31 | 2.01 | 7.1 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 77.31 | 2.01 | 7.1 | 使用SSLD预训练模型 | +| PPLCNet_x1_0 | 77.71 | 2.01 | 7.1 | 使用SSLD预训练模型+EDA策略| +| PPLCNet_x1_0 | 78.59 | 2.01 | 7.1 | 使用SSLD预训练模型+EDA策略+SKL-UGI知识蒸馏策略| + +从表中可以看出,backbone 为 Res2Net200_vd_26w_4s 和 SwinTransformer_tiny 时精度较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度可以大幅提升,但是精度也大幅下降。将 backbone 替换为 PPLCNet_x1_0 时,精度较 MobileNetV3_small_x0_35 高 5.5%,于此同时,速度更快。在此基础上,使用 SSLD 预训练模型后,在不改变推理速度的前提下,精度可以提升 1%,进一步地,当融合EDA策略后,精度可以再提升 0.4%,最后,在使用 SKL-UGI 知识蒸馏后,精度可以继续提升 0.88%。此时,PPLCNet_x1_0 的精度与 SwinTransformer_tiny 仅相差1.58%,但是速度快 44 倍。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* 延时是基于 Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 测试得到,开启 MKLDNN 加速策略,线程数为10。 +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=person_attribute --infer_imgs=pulc_demo_imgs/person_attribute/090004.jpg +``` + +结果如下: +``` +>>> result +attributes: ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], output: [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], filename: pulc_demo_imgs/person_attribute/090004.jpg +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="person_attribute") +result = model.predict(input_data="pulc_demo_imgs/person_attribute/090004.jpg") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="person_attribute", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], 'filename': 'pulc_demo_imgs/person_attribute/090004.jpg'}] +``` + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +本案例中所使用的数据为[pa100k 数据集](https://www.v7labs.com/open-datasets/pa-100k)。 + + + +#### 3.2.2 数据集获取 + +部分数据可视化如下所示。 + +
+ +
+ + +我们将原始数据转换成了 PaddleClas 多标签可读的数据格式,可以直接下载。 + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压有人/无人场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/pa100k.tar +tar -xf pa100k.tar +cd ../ +``` + +执行上述命令后,`dataset/` 下存在 `pa100k` 目录,该目录中具有以下数据: + + +执行上述命令后,`pa100k`目录中具有以下数据: + +``` +pa100k +├── train +│   ├── 000001.jpg +│   ├── 000002.jpg +... +├── val +│   ├── 080001.jpg +│   ├── 080002.jpg +... +├── test +│   ├── 090001.jpg +│   ├── 090002.jpg +... +... +├── train_list.txt +├── train_val_list.txt +├── val_list.txt +├── test_list.txt +``` + +其中`train/`、`val/`、`test/`分别为训练集、验证集和测试集。`train_list.txt`、`val_list.txt`、`test_list.txt`分别为训练集、验证集、测试集的标签文件。在本例子中,`test_list.txt`暂时没有使用。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml` 中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml +``` + +验证集的最佳指标在 `77.71%` 左右(数据集较小,一般有0.3%左右的波动)。 + + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +输出结果如下: + +``` +[{'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `deploy/images/PULC/person_attribute/090004.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md)。 + + + +#### 4.1.1 教师模型训练 + +复用 `ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml` 中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +验证集的最佳指标为 `80.10%` 左右,当前教师模型最好的权重保存在 `output/ResNet101_vd/best_model.pdparams`。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +验证集的最佳指标为 `78.5%` 左右,当前模型最好的权重保存在 `output/DistillationModel/best_model_student.pdparams`。 + + + + +## 5. 超参搜索 + +在 [3.2 节](#3.2)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_person_attribute_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_person_attribute_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_person_attribute_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在`output/PPLCNet_x1_0/best_model.pdparams`中。 + + + +### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/person_attribute_infer.tar && tar -xf person_attribute_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── person_attribute_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/person_attribute/090004.jpg` 进行行人属性识别。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/person_attribute/inference_person_attribute.yaml -o Global.use_gpu=True +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/person_attribute/inference_person_attribute.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +090004.jpg: {'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]} +``` + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/person_attribute/inference_person_attribute.yaml -o Global.infer_imgs="./images/PULC/person_attribute/" +``` + +终端中会输出该文件夹内所有图像的属性识别结果,如下所示。 + +``` +090004.jpg: {'attributes': ['Male', 'Age18-60', 'Back', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'Backpack', 'Upper: LongSleeve UpperPlaid', 'Lower: Trousers', 'No boots'], 'output': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]} +090007.jpg: {'attributes': ['Female', 'Age18-60', 'Side', 'Glasses: False', 'Hat: False', 'HoldObjectsInFront: False', 'No bag', 'Upper: ShortSleeve', 'Lower: Skirt&Dress', 'No boots'], 'output': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0]} +``` + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_person_exists.md b/docs/zh_CN/PULC/PULC_person_exists.md new file mode 100644 index 0000000000000000000000000000000000000000..b3b830a893a4648645beab3a447ec8d894a5da4c --- /dev/null +++ b/docs/zh_CN/PULC/PULC_person_exists.md @@ -0,0 +1,472 @@ +# PULC 有人/无人分类模型 + +------ + + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图像](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的有人/无人的分类模型。该模型可以广泛应用于如监控场景、人员进出管控场景、海量数据过滤场景等。 + +下表列出了判断图片中是否有人的二分类模型的相关指标,前两行展现了使用 SwinTranformer_tiny 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第三行至第六行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + SKL-UGI 知识蒸馏策略训练得到的模型的相关指标。 + + +| 模型 | Tpr(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 95.69 | 95.30 | 111 | 使用 ImageNet 预训练模型 | +| MobileNetV3_small_x0_35 | 68.25 | 2.85 | 2.6 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 89.57 | 2.12 | 7.0 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 92.10 | 2.12 | 7.0 | 使用 SSLD 预训练模型 | +| PPLCNet_x1_0 | 93.43 | 2.12 | 7.0 | 使用 SSLD 预训练模型+EDA 策略| +| PPLCNet_x1_0 | 96.23 | 2.12 | 7.0 | 使用 SSLD 预训练模型+EDA 策略+SKL-UGI 知识蒸馏策略| + +从表中可以看出,backbone 为 SwinTranformer_tiny 时精度较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度可以大幅提升,但是会导致精度大幅下降。将 backbone 替换为速度更快的 PPLCNet_x1_0 时,精度较 MobileNetV3_small_x0_35 高 20 多个百分点,与此同时速度依旧可以快 20% 以上。在此基础上,使用 SSLD 预训练模型后,在不改变推理速度的前提下,精度可以提升约 2.6 个百分点,进一步地,当融合EDA策略后,精度可以再提升 1.3 个百分点,最后,在使用 SKL-UGI 知识蒸馏后,精度可以继续提升 2.8 个百分点。此时,PPLCNet_x1_0 达到了 SwinTranformer_tiny 模型的精度,但是速度快 40 多倍。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* `Tpr`指标的介绍可以参考 [3.2 小节](#3.2)的备注部分,延时是基于 Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 测试得到,开启 MKLDNN 加速策略,线程数为10。 +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=person_exists --infer_imgs=pulc_demo_imgs/person_exists/objects365_01780782.jpg +``` + +结果如下: +``` +>>> result +class_ids: [0], scores: [0.9955421453341842], label_names: ['nobody'], filename: pulc_demo_imgs/person_exists/objects365_01780782.jpg +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="person_exists") +result = model.predict(input_data="pulc_demo_imgs/person_exists/objects365_01780782.jpg") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="person_exists", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'class_ids': [0], 'scores': [0.9955421453341842], 'label_names': ['nobody'], 'filename': 'pulc_demo_imgs/person_exists/objects365_01780782.jpg'}] +``` + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档[环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +本案例中所使用的所有数据集均为开源数据,`train` 集合为[MS-COCO 数据](https://cocodataset.org/#overview)的训练集的子集,`val` 集合为[Object365 数据](https://www.objects365.org/overview.html)的训练集的子集,`ImageNet_val` 为[ImageNet-1k 数据](https://www.image-net.org/)的验证集。 + + + +#### 3.2.2 数据集获取 + +在公开数据集的基础上经过后处理即可得到本案例需要的数据,具体处理方法如下: + +- 训练集合,本案例处理了 MS-COCO 数据训练集的标注文件,如果某张图含有“人”的标签,且这个框的面积在整张图中的比例大于 10%,即认为该张图中含有人,如果某张图中没有“人”的标签,则认为该张图中不含有人。经过处理后,得到 92964 条可用数据,其中有人的数据有 39813 条,无人的数据 53151 条。 + +- 验证集合,从 Object365 数据中随机抽取一小部分数据,使用在 MS-COCO 上训练得到的较好的模型预测这些数据,将预测结果和数据的标注文件取交集,将交集的结果按照得到训练集的方法筛选出验证集合。经过处理后,得到 27820 条可用数据。其中有人的数据有 2255 条,无人的数据有 25565 条。 + +处理后的数据集部分数据可视化如下: + +![](../../images/PULC/docs/person_exists_data_demo.png) + +此处提供了经过上述方法处理好的数据,可以直接下载得到。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压有人/无人场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/person_exists.tar +tar -xf person_exists.tar +cd ../ +``` + +执行上述命令后,`dataset/` 下存在 `person_exists` 目录,该目录中具有以下数据: + +``` + +├── train +│   ├── 000000000009.jpg +│   ├── 000000000025.jpg +... +├── val +│   ├── objects365_01780637.jpg +│   ├── objects365_01780640.jpg +... +├── ImageNet_val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +... +├── train_list.txt +├── train_list.txt.debug +├── train_list_for_distill.txt +├── val_list.txt +└── val_list.txt.debug +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件,`train_list.txt.debug` 和 `val_list.txt.debug` 分别为训练集和验证集的 `debug` 标签文件,其分别是 `train_list.txt` 和 `val_list.txt` 的子集,用该文件可以快速体验本案例的流程。`ImageNet_val/` 是 ImageNet-1k 的验证集,该集合和 `train` 集合的混合数据用于本案例的 `SKL-UGI知识蒸馏策略`,对应的训练标签文件为 `train_list_for_distill.txt` 。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考 [PaddleClas 分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + +* 关于如何得到蒸馏的标签文件可以参考[知识蒸馏标签获得方法](../advanced_tutorials/ssld.md#3.2)。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml` 中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml +``` + +验证集的最佳指标在 `0.94-0.95` 之间(数据集较小,容易造成波动)。 + +**备注:** + +* 此时使用的指标为Tpr,该指标描述了在假正类率(Fpr)小于某一个指标时的真正类率(Tpr),是产业中二分类问题常用的指标之一。在本案例中,Fpr 为千分之一。关于 Fpr 和 Tpr 的更多介绍,可以参考[这里](https://baike.baidu.com/item/AUC/19282953)。 + +* 在eval时,会打印出来当前最佳的 TprAtFpr 指标,具体地,其会打印当前的 `Fpr`、`Tpr` 值,以及当前的 `threshold`值,`Tpr` 值反映了在当前 `Fpr` 值下的召回率,该值越高,代表模型越好。`threshold` 表示当前最佳 `Fpr` 所对应的分类阈值,可用于后续模型部署落地等。 + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [1], 'scores': [0.9999976], 'label_names': ['someone'], 'file_name': 'deploy/images/PULC/person_exists/objects365_02035329.jpg'}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `deploy/images/PULC/person_exists/objects365_02035329.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 二分类默认的阈值为0.5, 如果需要指定阈值,可以重写 `Infer.PostProcess.threshold` ,如`-o Infer.PostProcess.threshold=0.9794`,该值需要根据实际场景来确定,此处的 `0.9794` 是在该场景中的 `val` 数据集在千分之一 Fpr 下得到的最佳 Tpr 所得到的。 + + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md)。 + + + +#### 4.1.1 教师模型训练 + +复用 `ppcls/configs/PULC/person_exists/PPLCNet/PPLCNet_x1_0.yaml` 中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +验证集的最佳指标为 `0.96-0.98` 之间,当前教师模型最好的权重保存在 `output/ResNet101_vd/best_model.pdparams`。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型,使用ImageNet数据集的验证集作为新增的无标签数据。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +验证集的最佳指标为 `0.95-0.97` 之间,当前模型最好的权重保存在 `output/DistillationModel/best_model_student.pdparams`。 + + + + +## 5. 超参搜索 + +在 [3.3 节](#3.3)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用 MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于 Paddle Inference 推理引擎的介绍,可以参考 [Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_person_exists_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_person_exists_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_person_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在`output/PPLCNet_x1_0/best_model.pdparams`中。 + + + +### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/person_exists_infer.tar && tar -xf person_exists_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── person_exists_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/person_exists/objects365_02035329.jpg` 进行有人/无人分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/person_exists/inference_person_exists.yaml +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/person_exists/inference_person_exists.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone'] +``` + + +**备注:** 二分类默认的阈值为0.5, 如果需要指定阈值,可以重写 `Infer.PostProcess.threshold` ,如`-o Infer.PostProcess.threshold=0.9794`,该值需要根据实际场景来确定,此处的 `0.9794` 是在该场景中的 `val` 数据集在千分之一 Fpr 下得到的最佳 Tpr 所得到的。该阈值的确定方法可以参考[3.3节](#3.3)备注部分。 + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/person_exists/inference_person_exists.yaml -o Global.infer_imgs="./images/PULC/person_exists/" +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +objects365_01780782.jpg: class id(s): [0], score(s): [1.00], label_name(s): ['nobody'] +objects365_02035329.jpg: class id(s): [1], score(s): [1.00], label_name(s): ['someone'] +``` + +其中,`someone` 表示该图里存在人,`nobody` 表示该图里不存在人。 + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_quickstart.md b/docs/zh_CN/PULC/PULC_quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..c7c6980625d6325bddbd5a6fed619147534c43b7 --- /dev/null +++ b/docs/zh_CN/PULC/PULC_quickstart.md @@ -0,0 +1,125 @@ +# PULC 快速体验 + +------ + +本文主要介绍通过 PaddleClas whl 包,使用 PULC 系列模型进行预测。 + +## 目录 + +- [1. 安装](#1) + - [1.1 安装PaddlePaddle](#11) + - [1.2 安装PaddleClas whl包](#12) +- [2. 快速体验](#2) + - [2.1 命令行使用](#2.1) + - [2.2 Python脚本使用](#2.2) + - [2.3 模型列表](#2.3) +- [3.小结](#3) + + + +## 1. 安装 + + + +### 1.1 安装 PaddlePaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 1.2 安装 PaddleClas whl 包 + +```bash +pip3 install paddleclas +``` + + + +## 2. 快速体验 + +PaddleClas 提供了一系列测试图片,里边包含人、车、OCR等方向的多个场景的demo数据。点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载并解压,然后在终端中切换到相应目录。 + + + +### 2.1 命令行使用 + +``` +cd /path/to/pulc_demo_imgs +``` + +使用命令行预测: + +```bash +paddleclas --model_name=person_exists --infer_imgs=pulc_demo_imgs/person_exists/objects365_01780782.jpg +``` + +结果如下: +``` +>>> result +class_ids: [0], scores: [0.9955421453341842], label_names: ['nobody'], filename: pulc_demo_imgs/person_exists/objects365_01780782.jpg +Predict complete! +``` + +若预测结果为 `nobody`,表示该图中没有人,若预测结果为 `someone`,则表示该图中有人。此处预测结果为 `nobody`,表示该图中没有人。 + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹,如需要替换模型,更改 `--model_name` 中的模型名字即可,模型名字可以参考[2.3 模型列表](#2.3)。 + + + +### 2.2 Python 脚本使用 + +此处提供了在 python 脚本中使用 PULC 有人/无人分类模型预测的例子。 + +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="person_exists") +result = model.predict(input_data="pulc_demo_imgs/person_exists/objects365_01780782.jpg") +print(next(result)) +``` + +打印的结果如下: + +``` +>>> result +[{'class_ids': [0], 'scores': [0.9955421453341842], 'label_names': ['nobody'], 'filename': 'pulc_demo_imgs/person_exists/objects365_01780782.jpg'}] +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="person_exists", batch_size=2)`。更换其他模型只需要替换`model_name`, `model_name`,可以参考[2.3 模型列表](#2.3)。 + + + +### 2.3 模型列表 + +PULC 系列模型的名称和简介如下: + +|模型名称|模型简介| +| --- | --- | +| person_exists | PULC有人/无人分类模型 | +| person_attribute | PULC人体属性识别模型 | +| safety_helmet | PULC佩戴安全帽分类模型 | +| traffic_sign | PULC交通标志分类模型 | +| vehicle_attribute | PULC车辆属性识别模型 | +| car_exists | PULC有车/无车分类模型 | +| text_image_orientation | PULC含文字图像方向分类模型 | +| textline_orientation | PULC文本行方向分类模型 | +| language_classification | PULC语种分类模型 | + + + +## 3. 小结 + +通过本节内容,相信您已经熟练掌握 PaddleClas whl 包的 PULC 模型使用方法并获得了初步效果。 + +PULC 方法产出的系列模型在人、车、OCR等方向的多个场景中均验证有效,用超轻量模型就可实现与 SwinTransformer 模型接近的精度,预测速度提高 40+ 倍。并且打通数据、模型训练、压缩和推理部署全流程,具体地,您可以参考[PULC有人/无人分类模型](PULC_person_exists.md)、[PULC人体属性识别模型](PULC_person_attribute.md)、[PULC佩戴安全帽分类模型](PULC_safety_helmet.md)、[PULC交通标志分类模型](PULC_traffic_sign.md)、[PULC车辆属性识别模型](PULC_vehicle_attribute.md)、[PULC有车/无车分类模型](PULC_car_exists.md)、[PULC含文字图像方向分类模型](PULC_text_image_orientation.md)、[PULC文本行方向分类模型](PULC_textline_orientation.md)、[PULC语种分类模型](PULC_language_classification.md)。 diff --git a/docs/zh_CN/PULC/PULC_safety_helmet.md b/docs/zh_CN/PULC/PULC_safety_helmet.md new file mode 100644 index 0000000000000000000000000000000000000000..0467b61b12c629ebc7a6e2a2268b4c82fe512abe --- /dev/null +++ b/docs/zh_CN/PULC/PULC_safety_helmet.md @@ -0,0 +1,438 @@ +# PULC 佩戴安全帽分类模型 + +------ + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 UDML 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图像](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的“是否佩戴安全帽”的二分类模型。该模型可以广泛应用于如建筑施工场景、工厂车间场景、交通场景等。 + +下表列出了判断图片中是否佩戴安全帽的二分类模型的相关指标,前三行展现了使用 Res2Net200_vd_26w_4s,SwinTranformer_tiny 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第四行至第七行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + UDML 知识蒸馏策略训练得到的模型的相关指标。 + +| 模型 | Tpr(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 93.57 | 91.32 | 111 | 使用ImageNet预训练模型 | +| Res2Net200_vd_26w_4s | 98.92 | 80.99 | 284 | 使用ImageNet预训练模型 | +| MobileNetV3_small_x0_35 | 84.83 | 2.85 | 2.6 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 93.27 | 2.03 | 7.1 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 98.16 | 2.03 | 7.1 | 使用SSLD预训练模型 | +| PPLCNet_x1_0 | 99.30 | 2.03 | 7.1 | 使用SSLD预训练模型+EDA策略| +| PPLCNet_x1_0 | 99.38 | 2.03 | 7.1 | 使用SSLD预训练模型+EDA策略+UDML知识蒸馏策略| + +从表中可以看出,在使用服务器端大模型作为 backbone 时,SwinTranformer_tiny 精度较低,Res2Net200_vd_26w_4s 精度较高,但服务器端大模型推理速度普遍较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度可以大幅提升,但是精度显著降低。在将 backbone 替换为 PPLCNet_x1_0 后,精度较 MobileNetV3_small_x0_35 提高约 8.5 个百分点,与此同时速度快 20% 以上。在此基础上,将 PPLCNet_x1_0 的预训练模型替换为 SSLD 预训练模型后,在对推理速度无影响的前提下,精度提升约 4.9 个百分点,进一步地使用 EDA 策略后,精度可以再提升 1.1 个百分点。此时,PPLCNet_x1_0 已经超过 Res2Net200_vd_26w_4s 模型的精度,但是速度快 70+ 倍。最后,在使用 UDML 知识蒸馏后,精度可以再提升 0.08 个百分点。下面详细介绍关于 PULC 安全帽模型的训练方法和推理部署方法。 + +**备注:** + +* `Tpr`指标的介绍可以参考 [3.3小节](#3.3)的备注部分,延时是基于 Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 测试得到,开启MKLDNN加速策略,线程数为10。 + +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=safety_helmet --infer_imgs=pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png +``` + +结果如下: +``` +>>> result +class_ids: [1], scores: [0.9986255], label_names: ['unwearing_helmet'], filename: pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="safety_helmet") +result = model.predict(input_data="pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="safety_helmet", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'class_ids': [1], 'scores': [0.9986255], 'label_names': ['unwearing_helmet'], 'filename': 'pulc_demo_imgs/safety_helmet/safety_helmet_test_1.png'}] +``` + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +本案例中所使用的所有数据集均为开源数据,数据集基于[Safety-Helmet-Wearing-Dataset](https://github.com/njvisionpower/Safety-Helmet-Wearing-Dataset)、[hard-hat-detection](https://www.kaggle.com/datasets/andrewmvd/hard-hat-detection)与[Large-scale CelebFaces Attributes (CelebA) Dataset](https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html)处理整合而来。 + + + +#### 3.2.2 数据集获取 + +在公开数据集的基础上经过后处理即可得到本案例需要的数据,具体处理方法如下: + +* 对于 Safety-Helmet-Wearing-Dataset 数据集:根据 bbox 标签数据,对其宽、高放大 3 倍作为 bbox 对图像进行裁剪,其中带有安全帽的图像类别为0,不戴安全帽的图像类别为1; +* 对于 hard-hat-detection 数据集:仅使用其中类别标签为 “hat” 的图像,并使用 bbox 标签进行裁剪,图像类别为0; +* 对于 CelebA 数据集:仅使用其中类别标签为 “Wearing_Hat” 的图像,并使用 bbox 标签进行裁剪,图像类别为0。 + +在整合上述数据后,可得到共约 15 万数据,其中戴安全帽与不戴安全帽的图像数量分别约为 2.8 万与 12.1 万,然后在两个类别上分别随机选取 0.56 万张图像作为测试集,共约 1.12 万张图像,其他约 13.8 万张图像作为训练集。 + +处理后的数据集部分数据可视化如下: + +![](../../images/PULC/docs/safety_helmet_data_demo.jpg) + +此处提供了经过上述方法处理好的数据,可以直接下载得到。 + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压安全帽场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/safety_helmet.tar +tar -xf safety_helmet.tar +cd ../ +``` + +执行上述命令后,`dataset/` 下存在 `safety_helmet` 目录,该目录中具有以下数据: + +``` +├── images +│   ├── VOC2028_part2_001209_1.jpg +│   ├── HHD_hard_hat_workers23_1.jpg +│   ├── CelebA_077809.jpg +│   ├── ... +│   └── ... +├── train_list.txt +└── val_list.txt +``` + +其中,`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件,所有的图像数据在 `images/` 目录下。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + +### 3.3 模型训练 + +在 `ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml` 中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml +``` + +验证集的最佳指标在 `0.985-0.993` 之间(数据集较小,容易造成波动)。 + +**备注:** + +* 此时使用的指标为Tpr,该指标描述了在假正类率(Fpr)小于某一个指标时的真正类率(Tpr),是产业中二分类问题常用的指标之一。在本案例中,Fpr 为万分之一。关于 Fpr 和 Tpr 的更多介绍,可以参考[这里](https://baike.baidu.com/item/AUC/19282953)。 + +* 在eval时,会打印出来当前最佳的 TprAtFpr 指标,具体地,其会打印当前的 `Fpr`、`Tpr` 值,以及当前的 `threshold`值,`Tpr` 值反映了在当前 `Fpr` 值下的召回率,该值越高,代表模型越好。`threshold` 表示当前最佳 `Fpr` 所对应的分类阈值,可用于后续模型部署落地等。 + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了训练过程中的最佳参数权重文件所在的路径,如需指定其他权重文件,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [1], 'scores': [0.9524797], 'label_names': ['unwearing_helmet'], 'file_name': 'deploy/images/PULC/safety_helmet/safety_helmet_test_1.png'}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `deploy/images/PULC/safety_helmet/safety_helmet_test_1.png` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 二分类默认的阈值为0.5, 如果需要指定阈值,可以重写 `Infer.PostProcess.threshold` ,如 `-o Infer.PostProcess.threshold=0.9167`,该值需要根据实际应用场景来确定,在 safety_helmet 数据集的 val 验证集上,在万分之一 Fpr 下得到的最佳 Tpr 时,该值为 0.9167。 + + + +## 4. 模型压缩 + + + +### 4.1 UDML 知识蒸馏 + +UDML 知识蒸馏是一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[UDML 知识蒸馏](../advanced_tutorials/knowledge_distillation.md#1.2.3)。 + + + +#### 4.1.1 蒸馏训练 + +配置文件 `ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml` 提供了 `UDML知识蒸馏策略` 的配置。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml +``` + +验证集的最佳指标为 `0.990-0.993` 之间,当前模型最好的权重保存在 `output/DistillationModel/best_model_student.pdparams`。 + + + +## 5. 超参搜索 + +在 [3.2 节](#3.2)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注**:此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference 可使用 MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于 Paddle Inference 推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_safety_helmet_infer +``` + +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_safety_helmet_infer` 目录,该目录下有如下文件结构: + +``` +├── PPLCNet_x1_0_safety_helmet_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在 `output/PPLCNet_x1_0/best_model.pdparams` 中。 + + + +### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/safety_helmet_infer.tar && tar -xf safety_helmet_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── safety_helmet_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/safety_helmet/safety_helmet_test_1.png` 进行是否佩戴安全帽分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/safety_helmet/inference_safety_helmet.yaml +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/safety_helmet/inference_safety_helmet.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +safety_helmet_test_1.png: class id(s): [1], score(s): [1.00], label_name(s): ['unwearing_helmet'] +``` + +**备注:** 二分类默认的阈值为0.5, 如果需要指定阈值,可以重写 `Infer.PostProcess.threshold` ,如 `-o Infer.PostProcess.threshold=0.9167`,该值需要根据实际应用场景来确定,在 safety_helmet 数据集的 val 验证集上,在万分之一 Fpr 下得到的最佳 Tpr 时,该值为 0.9167。该阈值的确定方法可以参考[3.3节](#3.3)备注部分。 + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/safety_helmet/inference_safety_helmet.yaml -o Global.infer_imgs="./images/PULC/safety_helmet/" +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +safety_helmet_test_1.png: class id(s): [1], score(s): [1.00], label_name(s): ['unwearing_helmet'] +safety_helmet_test_2.png: class id(s): [0], score(s): [1.00], label_name(s): ['wearing_helmet'] +``` + +其中,`wearing_helmet` 表示该图中的人佩戴了安全帽,`unwearing_helmet` 表示该图中的人未佩戴安全帽。 + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_text_image_orientation.md b/docs/zh_CN/PULC/PULC_text_image_orientation.md new file mode 100644 index 0000000000000000000000000000000000000000..d89396f0a0c4a67dd0990bd4e19725684b894020 --- /dev/null +++ b/docs/zh_CN/PULC/PULC_text_image_orientation.md @@ -0,0 +1,460 @@ +# PULC 含文字图像方向分类模型 + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图片](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + +## 1. 模型和应用场景介绍 + +在诸如文档扫描、证照拍摄等过程中,有时为了拍摄更清晰,会将拍摄设备进行旋转,导致得到的图片也是不同方向的。此时,标准的OCR流程无法很好地应对这些数据。利用图像分类技术,可以预先判断含文字图像的方向,并将其进行方向调整,从而提高OCR处理的准确性。该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的含文字图像方向的分类模型。该模型可以广泛应用于金融、政务等行业的旋转图片的OCR处理场景中。 + +下表列出了判断含文字图像方向分类模型的相关指标,前两行展现了使用 SwinTranformer_tiny 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第三行至第五行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用EDA策略训练得到的模型的相关指标。 + +| 模型 | 精度(%) | 延时(ms) | 存储(M) | 策略 | +| ----------------------- | --------- | ---------- | --------- | -------------------------- | +| SwinTranformer_tiny | 99.12 | 89.65 | 111 | 使用ImageNet预训练模型 | +| MobileNetV3_small_x0_35 | 83.61 | 2.95 | 2.6 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 97.85 | 2.16 | 7.1 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 99.02 | 2.16 | 7.1 | 使用SSLD预训练模型 | +| **PPLCNet_x1_0** | **99.06** | **2.16** | **7.1** | 使用SSLD预训练模型+EDA策略 | + +从表中可以看出,backbone 为 SwinTranformer_tiny 时精度比较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度提升明显,但精度有了大幅下降。将 backbone 替换为 PPLCNet_x1_0 时,速度略为提升,同时精度较 MobileNetV3_small_x0_35 高了 14.24 个百分点。在此基础上,使用 SSLD 预训练模型后,在不改变推理速度的前提下,精度可以提升 1.17 个百分点,进一步地使用 EDA 策略后,精度可以再提升 0.04 个百分点。此时,PPLCNet_x1_0 与 SwinTranformer_tiny 的精度差别不大,但是速度明显变快。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=text_image_orientation --infer_imgs=pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg +``` + +结果如下: +``` +>>> result +class_ids: [0, 2], scores: [0.85615, 0.05046], label_names: ['0', '180'], filename: pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="text_image_orientation") +result = model.predict(input_data="pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="text_image_orientation", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'class_ids': [0, 2], 'scores': [0.85615, 0.05046], 'label_names': ['0', '180'], 'filename': 'pulc_demo_imgs/text_image_orientation/img_rot0_demo.jpg'}] +``` + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +[第1节](#1)中提供的模型使用内部数据训练得到,该数据集暂时不方便公开。这里基于 [ICDAR2019-ArT](https://ai.baidu.com/broad/introduction?dataset=art)、 [XFUND](https://github.com/doc-analysis/XFUND) 和 [ICDAR2015](https://rrc.cvc.uab.es/?ch=4&com=introduction) 三个公开数据集构造了一个小规模含文字图像方向分类数据集,用于体验本案例。 + +![](../../images/PULC/docs/text_image_orientation_original_data.png) + + + +#### 3.2.2 数据集获取 + +在公开数据集的基础上经过后处理即可得到本案例需要的数据,具体处理方法如下: + +考虑到原始图片的分辨率较高,模型训练时间较长,这里将所有数据预先进行了缩放处理,在保持长宽比不变的前提下,将短边缩放到384。然后将数据进行顺时针旋转处理,分别生成90度、180度和270度的合成数据。其中,将 ICDAR2019-ArT 和 XFUND 生成的41460张数据按照 9:1 的比例随机划分成了训练集和验证集, ICDAR2015 生成的6000张数据作为`SKL-UGI知识蒸馏策略`实验中的补充数据。 + +处理后的数据集部分数据可视化如下: + +![](../../images/PULC/docs/text_image_orientation_data_demo.png) + +此处提供了经过上述方法处理好的数据,可以直接下载得到。 + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压含文字图像方向场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/text_image_orientation.tar +tar -xf text_image_orientation.tar +cd ../ +``` + +执行上述命令后,`dataset/`下存在`text_image_orientation`目录,该目录中具有以下数据: + +``` +├── img_0 +│ ├── img_rot0_0.jpg +│ ├── img_rot0_1.png +... +├── img_90 +│ ├── img_rot90_0.jpg +│ ├── img_rot90_1.png +... +├── img_180 +│ ├── img_rot180_0.jpg +│ ├── img_rot180_1.png +... +├── img_270 +│ ├── img_rot270_0.jpg +│ ├── img_rot270_1.png +... +├── distill_data +│ ├── gt_7060_0.jpg +│ ├── gt_7060_90.jpg +... +├── train_list.txt +├── train_list.txt.debug +├── train_list_for_distill.txt +├── test_list.txt +├── test_list.txt.debug +└── label_list.txt +``` + +其中`img_0/`、`img_90/`、`img_180/`和`img_270/`分别存放了4个角度的训练集和验证集数据。`train_list.txt`和`test_list.txt`分别为训练集和验证集的标签文件,`train_list.txt.debug`和`test_list.txt.debug`分别为训练集和验证集的`debug`标签文件,其分别是`train_list.txt`和`test_list.txt`的子集,用该文件可以快速体验本案例的流程。`distill_data/`是补充文字数据,该集合和`train`集合的混合数据用于本案例的`SKL-UGI知识蒸馏策略`,对应的训练标签文件为`train_list_for_distill.txt`。关于如何得到蒸馏的标签可以参考[知识蒸馏标签获得](../advanced_tutorials/ssld.md#3.2)。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + +* 关于如何得到蒸馏的标签文件可以参考[知识蒸馏标签获得方法](../advanced_tutorials/ssld.md#3.2)。 + + + +### 3.3 模型训练 + +在`ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml`中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml +``` + +验证集的最佳指标在0.99左右。 + +**备注**:本文档中提到的训练指标均为在大规模内部数据上的训练指标,使用 demo 数据训练时,由于数据集规模较小且分布与大规模内部数据不同,无法达到该指标。可以进一步扩充自己的数据并且使用本案例中介绍的优化方法进行调优,从而达到更高的精度。 + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +输出结果如下: + +``` +[{'class_ids': [0, 2], 'scores': [0.85615, 0.05046], 'file_name': 'deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg', 'label_names': ['0', '180']}] +``` + +**备注:** + +- 其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 +- 默认是对 `deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 +- 输出为top2的预测结果,`0` 表示该图文本方向为0度,`90` 表示该图文本方向为顺时针90度,`180` 表示该图文本方向为顺时针180度,`270` 表示该图文本方向为顺时针270度。 + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md)。 + + + +#### 4.1.1 教师模型训练 + +复用`ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml`中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +验证集的最佳指标为 0.996 左右,当前教师模型最好的权重保存在`output/ResNet101_vd/best_model.pdparams`。 + +**备注:** 训练 ResNet101_vd 模型需要的显存较多,如果机器显存不够,可以将学习率和 batch size 同时缩小一定的倍数进行训练。如在命令后添加以下参数 `-o DataLoader.Train.sampler.batch_size=64`, `Optimizer.lr.learning_rate=0.1`。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI 知识蒸馏策略`的配置。该配置将 `ResNet101_vd` 当作教师模型,`PPLCNet_x1_0` 当作学生模型,使用[3.2.2节](#3.2.2)中介绍的蒸馏数据作为新增的无标签数据。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +验证集的最佳指标为0.99左右,当前模型最好的权重保存在`output/DistillationModel/best_model_student.pdparams`。 + + + +## 5. 超参搜索 + +在 [3.2 节](#3.2)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +#### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_text_image_orientation_infer +``` + +执行完该脚本后会在`deploy/models/`下生成`PPLCNet_x1_0_text_image_orientation_infer`文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_text_image_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在`output/PPLCNet_x1_0/best_model.pdparams`中。 + + + +#### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/text_image_orientation_infer.tar && tar -xf text_image_orientation_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── text_image_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/text_image_orientation/img_rot0_demo.png` 进行含文字图像方向分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/text_image_orientation/inference_text_image_orientation.yaml +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/text_image_orientation/inference_text_image_orientation.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +img_rot0_demo.jpg: class id(s): [0, 2], score(s): [0.86, 0.05], label_name(s): ['0', '180'] +``` + +其中,输出为top2的预测结果,`0` 表示该图文本方向为0度,`90` 表示该图文本方向为顺时针90度,`180` 表示该图文本方向为顺时针180度,`270` 表示该图文本方向为顺时针270度。 + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/text_image_orientation/inference_text_image_orientation.yaml -o Global.infer_imgs="./images/PULC/text_image_orientation/" +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +img_rot0_demo.jpg: class id(s): [0, 2], score(s): [0.86, 0.05], label_name(s): ['0', '180'] +img_rot180_demo.jpg: class id(s): [2, 1], score(s): [0.88, 0.04], label_name(s): ['180', '90'] +``` + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_textline_orientation.md b/docs/zh_CN/PULC/PULC_textline_orientation.md new file mode 100644 index 0000000000000000000000000000000000000000..eea10307532eb0a8a323a82108b0c5f9691a82f8 --- /dev/null +++ b/docs/zh_CN/PULC/PULC_textline_orientation.md @@ -0,0 +1,457 @@ +# PULC 文本行方向分类模型 + +------ + + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图像](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的文本行方向分类模型。该模型可以广泛应用于如文字矫正、文字识别等场景。 + +下表列出了文本行方向分类模型的相关指标,前两行展现了使用 Res2Net200_vd 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第三行至第七行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + SKL-UGI 知识蒸馏策略训练得到的模型的相关指标。 + + +| 模型 | Top-1 Acc(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 93.61 | 89.64 | 111 | 使用 ImageNet 预训练模型 | +| MobileNetV3_small_x0_35 | 81.40 | 2.96 | 2.6 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 89.99 | 2.11 | 7.0 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0* | 94.06 | 2.68 | 7.0 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0* | 94.11 | 2.68 | 7.0 | 使用 SSLD 预训练模型 | +| PPLCNet_x1_0** | 96.01 | 2.72 | 7.0 | 使用 SSLD 预训练模型+EDA 策略| +| PPLCNet_x1_0** | 95.86 | 2.72 | 7.0 | 使用 SSLD 预训练模型+EDA 策略+SKL-UGI 知识蒸馏策略| + +从表中可以看出,backbone 为 SwinTranformer_tiny 时精度较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度可以大幅提升,精度下降也比较明显。将 backbone 替换为 PPLCNet_x1_0 时,精度较 MobileNetV3_small_x0_35 高 8.6 个百分点,速度快10%左右。在此基础上,更改分辨率和stride, 速度变慢 27%,但是精度可以提升 4.5 个百分点(采用[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)的方案),使用 SSLD 预训练模型后,精度可以继续提升约 0.05 个百分点 ,进一步地,当融合EDA策略后,精度可以再提升 1.9 个百分点。最后,融合SKL-UGI 知识蒸馏策略后,在该场景无效。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* 其中不带\*的模型表示分辨率为224x224,带\*的模型表示分辨率为48x192(h\*w),数据增强从网络中的 stride 改为 `[2, [2, 1], [2, 1], [2, 1], [2, 1]]`,其中,外层列表中的每一个元素代表网络结构下采样层的stride,该策略为 [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR) 提供的文本行方向分类器方案。带\*\*的模型表示分辨率为80x160(h\*w), 网络中的 stride 改为 `[2, [2, 1], [2, 1], [2, 1], [2, 1]]`,其中,外层列表中的每一个元素代表网络结构下采样层的stride,此分辨率是经过[超参数搜索策略](PULC_train.md#4-超参搜索)搜索得到的。 +* 延时是基于 Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 测试得到,开启 MKLDNN 加速策略,线程数为10。 +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=textline_orientation --infer_imgs=pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png +``` + +结果如下: +``` +>>> result +class_ids: [0], scores: [1.0], label_names: ['0_degree'], filename: pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="textline_orientation") +result = model.predict(input_data="pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="textline_orientation", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'class_ids': [0], 'scores': [1.0], 'label_names': ['0_degree'], 'filename': 'pulc_demo_imgs/textline_orientation/textline_orientation_test_0_0.png'}] +``` + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +本案例中所使用的所有数据集来源于内部数据,如果您希望体验训练过程,可以使用开源数据如[ICDAR2019-LSVT 文本行识别数据](https://aistudio.baidu.com/aistudio/datasetdetail/8429)。 + + + +#### 3.2.2 数据集获取 + +在公开数据集的基础上经过后处理即可得到本案例需要的数据,具体处理方法如下: + +本案例处理了 ICDAR2019-LSVT 文本行识别数据,将其中的 id 号为 0-1999 作为本案例的数据集合,经过旋转处理成 0 类 和 1 类,其中 0 类代表文本行为正,即 0 度,1 类代表文本行为反,即 180 度。 + +- 训练集合,id号为 0-1799 作为训练集合,0 类和 1 类共 3600 张。 + +- 验证集合,id号为 1800-1999 作为验证集合,0 类和 1 类共 400 张。 + +处理后的数据集部分数据可视化如下: + +![](../../images/PULC/docs/textline_orientation_data_demo.png) + +此处提供了经过上述方法处理好的数据,可以直接下载得到。 + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压文本行方向分类场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/textline_orientation.tar +tar -xf textline_orientation.tar +cd ../ +``` + +执行上述命令后,`dataset/` 下存在 `textline_orientation` 目录,该目录中具有以下数据: + +``` +├── 0 +│   ├── img_0.jpg +│   ├── img_1.jpg +... +├── 1 +│   ├── img_0.jpg +│   ├── img_1.jpg +... +├── train_list.txt +└── val_list.txt +``` + +其中 `0/` 和 `1/` 分别存放 0 类和 1 类的数据。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt` 的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml` 中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml +``` + + +**备注:** + +* 由于此时使用的数据集并非内部非开源数据集,此处不能直接复现提供的模型的指标,如果希望得到更高的精度,可以根据需要处理[ICDAR2019-LSVT 文本行识别数据](https://aistudio.baidu.com/aistudio/datasetdetail/8429)。 + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [0], 'scores': [1.0], 'file_name': 'deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png', 'label_names': ['0_degree']}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md)。 + + + +#### 4.1.1 教师模型训练 + +复用 `./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml` 中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +当前教师模型最好的权重保存在 `output/ResNet101_vd/best_model.pdparams`。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +当前模型最好的权重保存在 `output/DistillationModel/best_model_student.pdparams`。 + + + + +## 5. 超参搜索 + +在 [3.3 节](#3.3)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_textline_orientation_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_textline_orientation_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_textline_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重可以根据实际情况来选择,如果希望导出知识蒸馏后的权重,则最佳权重保存在`output/DistillationModel/best_model_student.pdparams`,在导出命令中更改`-o Global.pretrained_model=xx`中的字段为`output/DistillationModel/best_model_student`即可。 + + + +### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/textline_orientation_infer.tar && tar -xf textline_orientation_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── textline_orientation_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/textline_orientation/textline_orientation_test_0_0.png` 进行文字方向cd分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/textline_orientation/inference_textline_orientation.yaml +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/textline_orientation/inference_textline_orientation.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +textline_orientation_test_0_0.png: class id(s): [0], score(s): [1.00], label_name(s): ['0_degree'] +``` + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/textline_orientation/inference_textline_orientation.yaml -o Global.infer_imgs="./images/PULC/textline_orientation/" +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +textline_orientation_test_0_0.png: class id(s): [0], score(s): [1.00], label_name(s): ['0_degree'] +textline_orientation_test_0_1.png: class id(s): [0], score(s): [1.00], label_name(s): ['0_degree'] +textline_orientation_test_1_0.png: class id(s): [1], score(s): [1.00], label_name(s): ['180_degree'] +textline_orientation_test_1_1.png: class id(s): [1], score(s): [1.00], label_name(s): ['180_degree'] +``` + +其中,`0_degree` 表示该文本行为 0 度,`180_degree` 表示该文本行为 180 度。 + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_traffic_sign.md b/docs/zh_CN/PULC/PULC_traffic_sign.md new file mode 100644 index 0000000000000000000000000000000000000000..700cbd58b89501ec8b7fe9add5bdceb373a36936 --- /dev/null +++ b/docs/zh_CN/PULC/PULC_traffic_sign.md @@ -0,0 +1,485 @@ +# PULC 交通标志分类模型 + +------ + + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图像](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的交通标志分类模型。该模型可以广泛应用于自动驾驶、道路监控等场景。 + +下表列出了不同交通标志分类模型的相关指标,前两行展现了使用 SwinTranformer_tiny 和 MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第三行至第六行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + SKL-UGI 知识蒸馏策略训练得到的模型的相关指标。 + + +| 模型 | Top-1 Acc(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 98.11 | 89.45 | 111 | 使用ImageNet预训练模型 | +| MobileNetV3_small_x0_35 | 93.88 | 3.01 | 3.9 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 97.78 | 2.10 | 8.2 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 97.84 | 2.10 | 8.2 | 使用SSLD预训练模型 | +| PPLCNet_x1_0 | 98.14 | 2.10 | 8.2 | 使用SSLD预训练模型+EDA策略| +| PPLCNet_x1_0 | 98.35 | 2.10 | 8.2 | 使用SSLD预训练模型+EDA策略+SKL-UGI知识蒸馏策略| + +从表中可以看出,backbone 为 SwinTranformer_tiny 时精度较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度可以大幅提升,但是精度下降明显。将 backbone 替换为 PPLCNet_x1_0 时,精度低3.9%,同时速度提升 43% 左右。在此基础上,使用 SSLD 预训练模型后,在不改变推理速度的前提下,精度可以提升约 0.06%,进一步地,当融合EDA策略后,精度可以再提升 0.3%,最后,在使用 SKL-UGI 知识蒸馏后,精度可以继续提升 0.21%。此时,PPLCNet_x1_0 的精度超越了 SwinTranformer_tiny,速度快 41 倍。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=traffic_sign --infer_imgs=pulc_demo_imgs/traffic_sign/100999_83928.jpg +``` + +结果如下: +``` +>>> result +class_ids: [182, 179, 162, 128, 24], scores: [0.98623, 0.01255, 0.00022, 0.00021, 0.00012], label_names: ['pl110', 'pl100', 'pl120', 'p26', 'pm10'], filename: pulc_demo_imgs/traffic_sign/100999_83928.jpg +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="traffic_sign") +result = model.predict(input_data="pulc_demo_imgs/traffic_sign/100999_83928.jpg") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="traffic_sign", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'class_ids': [182, 179, 162, 128, 24], 'scores': [0.98623, 0.01255, 0.00022, 0.00021, 0.00012], 'label_names': ['pl110', 'pl100', 'pl120', 'p26', 'pm10'], 'filename': 'pulc_demo_imgs/traffic_sign/100999_83928.jpg'}] +``` + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +本案例中所使用的数据为[Tsinghua-Tencent 100K dataset (CC-BY-NC license)](https://cg.cs.tsinghua.edu.cn/traffic-sign/),在使用的过程中,对交通标志检测框进行随机扩充与裁剪,从而得到用于训练与测试的图像,下面简称该数据集为`TT100K`数据集。 + + + +#### 3.2.2 数据集获取 + +在TT00K数据集上,对交通标志检测框进行随机扩充与裁剪,从而得到用于训练与测试的图像。随机扩充检测框的逻辑如下所示。 + +```python +def get_random_crop_box(xmin, ymin, xmax, ymax, img_height, img_width, ratio=1.0): + h = ymax - ymin + w = ymax - ymin + + xmin_diff = random.random() * ratio * min(w, xmin/ratio) + ymin_diff = random.random() * ratio * min(h, ymin/ratio) + xmax_diff = random.random() * ratio * min(w, (img_width-xmin-1)/ratio) + ymax_diff = random.random() * ratio * min(h, (img_height-ymin-1)/ratio) + + new_xmin = round(xmin - xmin_diff) + new_ymin = round(ymin - ymin_diff) + new_xmax = round(xmax + xmax_diff) + new_ymax = round(ymax + ymax_diff) + + return new_xmin, new_ymin, new_xmax, new_ymax +``` + +完整的预处理逻辑,可以参考下载好的数据集文件夹中的`deal.py`文件。 + + +处理后的数据集部分数据可视化如下。 + +
+ +
+ + +此处提供了经过上述方法处理好的数据,可以直接下载得到。 + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压交通标志分类场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/traffic_sign.tar +tar -xf traffic_sign.tar +cd ../ +``` + +执行上述命令后,`dataset/`下存在`traffic_sign`目录,该目录中具有以下数据: + +``` +traffic_sign +├── train +│ ├── 0_62627.jpg +│ ├── 100000_89031.jpg +│ ├── 100001_89031.jpg +... +├── test +│ ├── 100423_2315.jpg +│ ├── 100424_2315.jpg +│ ├── 100425_2315.jpg +... +├── other +│ ├── 100603_3422.jpg +│ ├── 100604_3422.jpg +... +├── label_list_train.txt +├── label_list_test.txt +├── label_list_other.txt +├── label_list_train_for_distillation.txt +├── label_list_train.txt.debug +├── label_list_test.txt.debug +├── label_name_id.txt +├── deal.py +``` + +其中`train/`和`test/`分别为训练集和验证集。`label_list_train.txt`和`label_list_test.txt`分别为训练集和验证集的标签文件,`label_list_train.txt.debug`和`label_list_test.txt.debug`分别为训练集和验证集的`debug`标签文件,其分别是`label_list_train.txt`和`label_list_test.txt`的子集,用该文件可以快速体验本案例的流程。`train`与`other`的混合数据用于本案例的`SKL-UGI知识蒸馏策略`,对应的训练标签文件为`label_list_train_for_distillation.txt`。 + + +**备注:** + +* 关于 `label_list_train.txt`、`label_list_test.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + +* 关于如何得到蒸馏的标签文件可以参考[知识蒸馏标签获得方法](../advanced_tutorials/ssld.md)。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml` 中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml +``` + +验证集的最佳指标在 `98.14%` 左右(数据集较小,一般有0.1%左右的波动)。 + + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model +``` + +输出结果如下: + +``` +99603_17806.jpg: class id(s): [216, 145, 49, 207, 169], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['pm20', 'pm30', 'pm40', 'pl25', 'pm15'] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `deploy/images/PULC/traffic_sign/99603_17806.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md#3.2)。 + + + +#### 4.1.1 教师模型训练 + +复用 `ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml` 中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +验证集的最佳指标为 `98.59%` 左右,当前教师模型最好的权重保存在 `output/ResNet101_vd/best_model.pdparams`。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型,使用ImageNet数据集的验证集作为新增的无标签数据。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +验证集的最佳指标为 `98.35%` 左右,当前模型最好的权重保存在 `output/DistillationModel/best_model_student.pdparams`。 + + + + +## 5. 超参搜索 + +在 [3.2 节](#3.2)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_traffic_sign_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_traffic_sign_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_traffic_sign_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在`output/PPLCNet_x1_0/best_model.pdparams`中。 + + + +### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/traffic_sign_infer.tar && tar -xf traffic_sign_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── traffic_sign_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/traffic_sign/99603_17806.jpg` 进行交通标志分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/traffic_sign/inference_traffic_sign.yaml +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/traffic_sign/inference_traffic_sign.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +99603_17806.jpg: class id(s): [216, 145, 49, 207, 169], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['pm20', 'pm30', 'pm40', 'pl25', 'pm15'] +``` + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/traffic_sign/inference_traffic_sign.yaml -o Global.infer_imgs="./images/PULC/traffic_sign/" +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +100999_83928.jpg: class id(s): [182, 179, 162, 128, 24], score(s): [0.99, 0.01, 0.00, 0.00, 0.00], label_name(s): ['pl110', 'pl100', 'pl120', 'p26', 'pm10'] +99603_17806.jpg: class id(s): [216, 145, 49, 24, 169], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['pm20', 'pm30', 'pm40', 'pm10', 'pm15'] +``` + +输出的 `label_name`可以从`dataset/traffic_sign/report.pdf`文件中查阅对应的图片。 + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/PULC/PULC_train.md b/docs/zh_CN/PULC/PULC_train.md new file mode 100644 index 0000000000000000000000000000000000000000..035535c7f9eb04af952c628fca85cedaaffc97b8 --- /dev/null +++ b/docs/zh_CN/PULC/PULC_train.md @@ -0,0 +1,241 @@ +## 超轻量图像分类方案PULC +------ + + +## 目录 + +- [1. PULC方案简介](#1) +- [2. 数据准备](#2) + - [2.1 数据集格式说明](#2.1) + - [2.2 标注文件生成](#2.2) +- [3. 使用标准分类配置进行训练](#3) + - [3.1 骨干网络PP-LCNet](#3.1) + - [3.2 SSLD预训练权重](#3.2) + - [3.3 EDA数据增强策略](#3.3) + - [3.4 SKL-UGI模型蒸馏](#3.4) + - [3.5 总结](#3.5) +- [4. 超参搜索](#4) + - [4.1 基于默认配置搜索](#4.1) + - [4.2 自定义搜索配置](#4.2) + + + +### 1. PULC方案简介 + +图像分类是计算机视觉的基础算法之一,是企业应用中最常见的算法,也是许多 CV 应用的重要组成部分。近年来,骨干网络模型发展迅速,ImageNet 的精度纪录被不断刷新。然而,这些模型在实用场景的表现有时却不尽如人意。一方面,精度高的模型往往体积大,运算慢,常常难以满足实际部署需求;另一方面,选择了合适的模型之后,往往还需要经验丰富的工程师进行调参,费时费力。PaddleClas 为了解决企业应用难题,让分类模型的训练和调参更加容易,总结推出了实用轻量图像分类解决方案(PULC, Practical Ultra Lightweight Classification)。PULC融合了骨干网络、数据增广、蒸馏等多种前沿算法,可以自动训练得到轻量且高精度的图像分类模型。 + +PULC 方案在人、车、OCR等方向的多个场景中均验证有效,用超轻量模型就可实现与 SwinTransformer 模型接近的精度,预测速度提高 40+ 倍。 + +
+ +
+ +方案主要包括 4 部分,分别是:PP-LCNet轻量级骨干网络、SSLD预训练权重、数据增强策略集成(EDA)和 SKL-UGI 知识蒸馏算法。此外,我们还采用了超参搜索的方法,高效优化训练中的超参数。下面,我们以有人/无人场景为例,对方案进行说明。 + +**备注**:针对一些特定场景,我们提供了基础的训练文档供参考,例如[有人/无人分类模型](PULC_person_exists.md)等,您可以在[这里](./PULC_model_list.md)找到这些文档。如果这些文档中的方法不能满足您的需求,或者您需要自定义训练任务,您可以参考本文档。 + + + +### 2. 数据准备 + + + +#### 2.1 数据集格式说明 + +PaddleClas 使用 `txt` 格式文件指定训练集和测试集,以有人/无人场景为例,其中需要指定 `train_list.txt` 和 `val_list.txt` 当作训练集和验证集的数据标签,格式形如: + +``` +# 每一行采用"空格"分隔图像路径与标注 +train/1.jpg 0 +train/10.jpg 1 +... +``` + +如果您想获取更多常用分类数据集的信息,可以参考文档可以参考 [PaddleClas 分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + +#### 2.2 标注文件生成 + +如果您已经有实际场景中的数据,那么按照上节的格式进行标注即可。这里,我们提供了一个快速生成数据的脚本,您只需要将不同类别的数据分别放在文件夹中,运行脚本即可生成标注文件。 + +首先,假设您存放数据的路径为`./train`,`train/` 中包含了每个类别的数据,类别号从 0 开始,每个类别的文件夹中有具体的图像数据。 + +```shell +train +├── 0 +│   ├── 0.jpg +│   ├── 1.jpg +│   └── ... +└── 1 + ├── 0.jpg + ├── 1.jpg + └── ... +└── ... +``` + +```shell +tree -r -i -f train | grep -E "jpg|JPG|jpeg|JPEG|png|PNG" | awk -F "/" '{print $0" "$2}' > train_list.txt +``` + +其中,如果涉及更多的图片名称尾缀,可以增加 `grep -E`后的内容, `$2` 中的 `2` 为类别号文件夹的层级。 + +**备注:** 以上为数据集获取和生成的方法介绍,这里您可以直接下载有人/无人场景数据快速开始体验。 + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,下载并解压有人/无人场景的数据。 + +```shell +cd dataset +wget https://paddleclas.bj.bcebos.com/data/PULC/person_exists.tar +tar -xf person_exists.tar +cd ../ +``` + + + +### 3. 使用标准分类配置进行训练 + + + +#### 3.1 骨干网络PP-LCNet + +PULC 采用了轻量骨干网络 PP-LCNet,相比同精度竞品速度快 50%,您可以在[PP-LCNet介绍](../models/PP-LCNet.md)查阅该骨干网络的详细介绍。 +直接使用 PP-LCNet 训练的命令为: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml +``` + +为了方便性能对比,我们也提供了大模型 SwinTransformer_tiny 和轻量模型 MobileNetV3_small_x0_35 的配置文件,您可以使用命令训练: + +SwinTransformer_tiny: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml +``` + +MobileNetV3_small_x0_35: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml +``` + +训练得到的模型精度对比如下表。 + +| 模型 | Tpr(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 95.69 | 95.30 | 107 | 使用 ImageNet 预训练模型 | +| MobileNetV3_small_x0_35 | 68.25 | 2.85 | 1.6 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 89.57 | 2.12 | 6.5 | 使用 ImageNet 预训练模型 | + +从中可以看出,PP-LCNet 的速度比 SwinTransformer 快很多,但是精度也略低。下面我们通过一系列优化来提高 PP-LCNet 模型的精度。 + + + +#### 3.2 SSLD预训练权重 + +SSLD 是百度自研的半监督蒸馏算法,在 ImageNet 数据集上,模型精度可以提升 3-7 个点,您可以在 [SSLD 介绍](../advanced_tutorials/ssld.md)找到详细介绍。我们发现,使用SSLD预训练权重,可以有效提升应用分类模型的精度。此外,在训练中使用更小的分辨率,可以有效提升模型精度。同时,我们也对学习率进行了优化。 +基于以上三点改进,我们训练得到模型精度为 92.1%,提升 2.6%。 + + + +#### 3.3 EDA数据增强策略 + +数据增强是视觉算法中常用的优化策略,可以对模型精度有明显提升。除了传统的 RandomCrop,RandomFlip 等方法之外,我们还应用了 RandomAugment 和 RandomErasing。您可以在[数据增强介绍](../advanced_tutorials/DataAugmentation.md)找到详细介绍。 +由于这两种数据增强对图片的修改较大,使分类任务变难,在一些小数据集上可能会导致模型欠拟合,我们将提前设置好这两种方法启用的概率。 +基于以上改进,我们训练得到模型精度为 93.43%,提升 1.3%。 + + + +#### 3.4 SKL-UGI模型蒸馏 + +模型蒸馏是一种可以有效提升小模型精度的方法,您可以在[知识蒸馏介绍](../advanced_tutorials/ssld.md)找到详细介绍。我们选择 ResNet101_vd 作为教师模型进行蒸馏。为了适应蒸馏过程,我们在此也对网络不同 stage 的学习率进行了调整。基于以上改进,我们训练得到模型精度为 95.6%,提升 1.4%。 + + + +#### 3.5 总结 + +经过以上方法优化,PP-LCNet最终精度达到 95.6%,达到了大模型的精度水平。我们将实验结果总结如下表: + +| 模型 | Tpr(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| SwinTranformer_tiny | 95.69 | 95.30 | 107 | 使用 ImageNet 预训练模型 | +| MobileNetV3_small_x0_35 | 68.25 | 2.85 | 1.6 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 89.57 | 2.12 | 6.5 | 使用 ImageNet 预训练模型 | +| PPLCNet_x1_0 | 92.10 | 2.12 | 6.5 | 使用 SSLD 预训练模型 | +| PPLCNet_x1_0 | 93.43 | 2.12 | 6.5 | 使用 SSLD 预训练模型+EDA 策略| +| PPLCNet_x1_0 | 95.60 | 2.12 | 6.5 | 使用 SSLD 预训练模型+EDA 策略+SKL-UGI 知识蒸馏策略| + +我们在其他 8 个场景中也使用了同样的优化策略,得到如下结果: + +| 场景 | 大模型 | 大模型精度(%) | 小模型 | 小模型精度(%) | +|----------|----------|----------|----------|----------| +| 人体属性识别 | Res2Net200_vd | 81.25 | PPLCNet_x1_0 | 78.59 | +| 佩戴安全帽分类 | Res2Net200_vd| 98.92 | PPLCNet_x1_0 |99.38 | +| 交通标志分类 | SwinTransformer_tiny | 98.11 | PPLCNet_x1_0 | 98.35 | +| 车辆属性识别 | Res2Net200_vd_26w_4s | 91.36 | PPLCNet_x1_0 | 90.81 | +| 有车/无车分类 | SwinTransformer_tiny | 97.71 | PPLCNet_x1_0 | 95.92 | +| 含文字图像方向分类 | SwinTransformer_tiny |99.12 | PPLCNet_x1_0 | 99.06 | +| 文本行方向分类 | SwinTransformer_tiny | 93.61 | PPLCNet_x1_0 | 96.01 | +| 语种分类 | SwinTransformer_tiny | 98.12 | PPLCNet_x1_0 | 99.26 | + + +从结果可以看出,PULC 方案在多个应用场景中均可提升模型精度。使用 PULC 方案可以大大减少模型优化的工作量,快速得到精度较高的模型。 + + + +### 4. 超参搜索 + +在上述训练过程中,我们调节了学习率、数据增广方法开启概率、分阶段学习率倍数等参数。 +这些参数在不同场景中最优值可能并不相同。我们提供了一个快速超参搜索的脚本,将超参调优的过程自动化。 +这个脚本会遍历搜索值列表中的参数来替代默认配置中的参数,依次训练,最终选择精度最高的模型所对应的参数作为搜索结果。 + + + +#### 4.1 基于默认配置搜索 + +配置文件 [search.yaml](../../../ppcls/configs/PULC/person_exists/search.yaml) 定义了有人/无人场景超参搜索的配置,使用如下命令即可完成超参数的搜索。 + +```bash +python3 tools/search_strategy.py -c ppcls/configs/PULC/person_exists/search.yaml +``` + +**备注**:关于搜索部分,我们也在不断优化,敬请期待。 + + + +#### 4.2 自定义搜索配置 + +您也可以根据训练结果或调参经验,修改超参搜索的配置。 + +修改 `lrs` 中的`search_values`字段,可以修改学习率搜索值列表; + +修改 `resolutions` 中的 `search_values` 字段,可以修改分辨率的搜索值列表; + +修改 `ra_probs` 中的 `search_values` 字段,可以修改 RandAugment 开启概率的搜索值列表; + +修改 `re_probs` 中的 `search_values` 字段,可以修改 RnadomErasing 开启概率的搜索值列表; + +修改 `lr_mult_list` 中的 `search_values` 字段,可以修改 lr_mult 搜索值列表; + +修改 `teacher` 中的 `search_values` 字段,可以修改教师模型的搜索列表。 + +搜索完成后,会在 `output/search_person_exists` 中生成最终的结果,其中,除`search_res`外 `output/search_person_exists` 中目录为对应的每个搜索的超参数的结果的权重和训练日志文件,`search_res` 对应的是蒸馏后的结果,也就是最终的模型,该模型的权重保存在`output/output_dir/search_person_exists/DistillationModel/best_model_student.pdparams`。 diff --git a/docs/zh_CN/PULC/PULC_vehicle_attribute.md b/docs/zh_CN/PULC/PULC_vehicle_attribute.md new file mode 100644 index 0000000000000000000000000000000000000000..03f67321fd04e1e33be0f7829da8bfce1c2be0a8 --- /dev/null +++ b/docs/zh_CN/PULC/PULC_vehicle_attribute.md @@ -0,0 +1,477 @@ +# PULC 车辆属性识别模型 + +------ + + +## 目录 + +- [1. 模型和应用场景介绍](#1) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.2.1 数据集来源](#3.2.1) + - [3.2.2 数据集获取](#3.2.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型压缩](#4) + - [4.1 SKL-UGI 知识蒸馏](#4.1) + - [4.1.1 教师模型训练](#4.1.1) + - [4.1.2 蒸馏训练](#4.1.2) +- [5. 超参搜索](#5) +- [6. 模型推理部署](#6) + - [6.1 推理模型准备](#6.1) + - [6.1.1 基于训练得到的权重导出 inference 模型](#6.1.1) + - [6.1.2 直接下载 inference 模型](#6.1.2) + - [6.2 基于 Python 预测引擎推理](#6.2) + - [6.2.1 预测单张图像](#6.2.1) + - [6.2.2 基于文件夹的批量预测](#6.2.2) + - [6.3 基于 C++ 预测引擎推理](#6.3) + - [6.4 服务化部署](#6.4) + - [6.5 端侧部署](#6.5) + - [6.6 Paddle2ONNX 模型转换与预测](#6.6) + + + + +## 1. 模型和应用场景介绍 + +该案例提供了用户使用 PaddleClas 的超轻量图像分类方案(PULC,Practical Ultra Lightweight image Classification)快速构建轻量级、高精度、可落地的车辆属性识别模型。该模型可以广泛应用于车辆识别、道路监控等场景。 + +下表列出了不同车辆属性识别模型的相关指标,前三行展现了使用 Res2Net200_vd_26w_4s、 ResNet50、MobileNetV3_small_x0_35 作为 backbone 训练得到的模型的相关指标,第四行至第七行依次展现了替换 backbone 为 PPLCNet_x1_0、使用 SSLD 预训练模型、使用 SSLD 预训练模型 + EDA 策略、使用 SSLD 预训练模型 + EDA 策略 + SKL-UGI 知识蒸馏策略训练得到的模型的相关指标。 + + +| 模型 | mA(%) | 延时(ms) | 存储(M) | 策略 | +|-------|-----------|----------|---------------|---------------| +| Res2Net200_vd_26w_4s | 91.36 | 79.46 | 293 | 使用ImageNet预训练模型 | +| ResNet50 | 89.98 | 12.83 | 92 | 使用ImageNet预训练模型 | +| MobileNetV3_small_x0_35 | 87.41 | 2.91 | 2.8 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 89.57 | 2.36 | 7.2 | 使用ImageNet预训练模型 | +| PPLCNet_x1_0 | 90.07 | 2.36 | 7.2 | 使用SSLD预训练模型 | +| PPLCNet_x1_0 | 90.59 | 2.36 | 7.2 | 使用SSLD预训练模型+EDA策略| +| PPLCNet_x1_0 | 90.81 | 2.36 | 7.2 | 使用SSLD预训练模型+EDA策略+SKL-UGI知识蒸馏策略| + +从表中可以看出,backbone 为 Res2Net200_vd_26w_4s 时精度较高,但是推理速度较慢。将 backbone 替换为轻量级模型 MobileNetV3_small_x0_35 后,速度可以大幅提升,但是精度下降明显。将 backbone 替换为 PPLCNet_x1_0 时,精度提升 2 个百分点,同时速度也提升 23% 左右。在此基础上,使用 SSLD 预训练模型后,在不改变推理速度的前提下,精度可以提升约 0.5 个百分点,进一步地,当融合EDA策略后,精度可以再提升 0.52 个百分点,最后,在使用 SKL-UGI 知识蒸馏后,精度可以继续提升 0.23 个百分点。此时,PPLCNet_x1_0 的精度与 Res2Net200_vd_26w_4s 仅相差 0.55 个百分点,但是速度快 32 倍。关于 PULC 的训练方法和推理部署方法将在下面详细介绍。 + +**备注:** + +* 延时是基于 Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz 测试得到,开启 MKLDNN 加速策略,线程数为10。 +* 关于PP-LCNet的介绍可以参考[PP-LCNet介绍](../models/PP-LCNet.md),相关论文可以查阅[PP-LCNet paper](https://arxiv.org/abs/2109.15099)。 + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +点击[这里](https://paddleclas.bj.bcebos.com/data/PULC/pulc_demo_imgs.zip)下载 demo 数据并解压,然后在终端中切换到相应目录。 + +* 使用命令行快速预测 + +```bash +paddleclas --model_name=vehicle_attribute --infer_imgs=pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg +``` + +结果如下: +``` +>>> result +attributes: Color: (yellow, prob: 0.9893476963043213), Type: (hatchback, prob: 0.9734097719192505), output: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], filename: pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg +Predict complete! +``` + +**备注**: 更换其他预测的数据时,只需要改变 `--infer_imgs=xx` 中的字段即可,支持传入整个文件夹。 + + +* 在 Python 代码中预测 +```python +import paddleclas +model = paddleclas.PaddleClas(model_name="vehicle_attribute") +result = model.predict(input_data="pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg") +print(next(result)) +``` + +**备注**:`model.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果, 默认 `batch_size` 为 1,如果需要更改 `batch_size`,实例化模型时,需要指定 `batch_size`,如 `model = paddleclas.PaddleClas(model_name="vehicle_attribute", batch_size=2)`, 使用默认的代码返回结果示例如下: + +``` +>>> result +[{'attributes': 'Color: (yellow, prob: 0.9893476963043213), Type: (hatchback, prob: 0.9734097719192505)', 'output': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 'filename': 'pulc_demo_imgs/vehicle_attribute/0002_c002_00030670_0.jpg'}] +``` + + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + + + +#### 3.2.1 数据集来源 + +本案例中所使用的数据为[VeRi 数据集](https://www.v7labs.com/open-datasets/veri-dataset)。 + + + +#### 3.2.2 数据集获取 + +部分数据可视化如下所示。 + +
+ +
+ +首先从[VeRi数据集官网](https://www.v7labs.com/open-datasets/veri-dataset)中申请并下载数据,放在PaddleClas的`dataset`目录下,数据集目录名为`VeRi`,使用下面的命令进入该文件夹。 + +```shell +cd PaddleClas/dataset/VeRi/ +``` + +然后使用下面的代码转换label(可以在python终端中执行下面的命令,也可以将其写入一个文件,然后使用`python3 convert.py`的方式运行该文件)。 + + +```python +import os +from xml.dom.minidom import parse + +vehicleids = [] + +def convert_annotation(input_fp, output_fp, subdir): + in_file = open(input_fp) + list_file = open(output_fp, 'w') + tree = parse(in_file) + + root = tree.documentElement + + for item in root.getElementsByTagName("Item"): + label = ['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0'] + if item.hasAttribute("imageName"): + name = item.getAttribute("imageName") + if item.hasAttribute("vehicleID"): + vehicleid = item.getAttribute("vehicleID") + if vehicleid not in vehicleids : + vehicleids.append(vehicleid) + vid = vehicleids.index(vehicleid) + if item.hasAttribute("colorID"): + colorid = int (item.getAttribute("colorID")) + label[colorid-1] = '1' + if item.hasAttribute("typeID"): + typeid = int (item.getAttribute("typeID")) + label[typeid+9] = '1' + label = ','.join(label) + list_file.write(os.path.join(subdir, name) + "\t" + label + "\n") + + list_file.close() + +convert_annotation('train_label.xml', 'train_list.txt', 'image_train') #imagename vehiclenum colorid typeid +convert_annotation('test_label.xml', 'test_list.txt', 'image_test') +``` + +执行上述命令后,`VeRi`目录中具有以下数据: + +``` +VeRi +├── image_train +│ ├── 0001_c001_00016450_0.jpg +│ ├── 0001_c001_00016460_0.jpg +│ ├── 0001_c001_00016470_0.jpg +... +├── image_test +│ ├── 0002_c002_00030600_0.jpg +│ ├── 0002_c002_00030605_1.jpg +│ ├── 0002_c002_00030615_1.jpg +... +... +├── train_list.txt +├── test_list.txt +├── train_label.xml +├── test_label.xml +``` + +其中`train/`和`test/`分别为训练集和验证集。`train_list.txt`和`test_list.txt`分别为训练集和验证集的转换后用于训练的标签文件。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml` 中提供了基于该场景的训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml +``` + +验证集的最佳指标在 `90.59%` 左右(数据集较小,一般有0.3%左右的波动)。 + + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model="output/PPLCNet_x1_0/best_model" +``` + +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model +``` + +输出结果如下: + +``` +[{'attr': 'Color: (yellow, prob: 0.9893478155136108), Type: (hatchback, prob: 0.9734100103378296)', 'pred': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 'file_name': './deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg'}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `./deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + + + +## 4. 模型压缩 + + + +### 4.1 SKL-UGI 知识蒸馏 + +SKL-UGI 知识蒸馏是 PaddleClas 提出的一种简单有效的知识蒸馏方法,关于该方法的介绍,可以参考[SKL-UGI 知识蒸馏](../advanced_tutorials/ssld.md)。 + + + +#### 4.1.1 教师模型训练 + +复用 `ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml` 中的超参数,训练教师模型,训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Arch.name=ResNet101_vd +``` + +验证集的最佳指标为 `91.60%` 左右,当前教师模型最好的权重保存在 `output/ResNet101_vd/best_model.pdparams`。 + + + +#### 4.1.2 蒸馏训练 + +配置文件`ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml`提供了`SKL-UGI知识蒸馏策略`的配置。该配置将`ResNet101_vd`当作教师模型,`PPLCNet_x1_0`当作学生模型。训练脚本如下: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml \ + -o Arch.models.0.Teacher.pretrained=output/ResNet101_vd/best_model +``` + +验证集的最佳指标为 `90.81%` 左右,当前模型最好的权重保存在 `output/DistillationModel/best_model_student.pdparams`。 + + + + +## 5. 超参搜索 + +在 [3.3 节](#3.3)和 [4.1 节](#4.1)所使用的超参数是根据 PaddleClas 提供的 `超参数搜索策略` 搜索得到的,如果希望在自己的数据集上得到更好的结果,可以参考[超参数搜索策略](PULC_train.md#4-超参搜索)来获得更好的训练超参数。 + +**备注:** 此部分内容是可选内容,搜索过程需要较长的时间,您可以根据自己的硬件情况来选择执行。如果没有更换数据集,可以忽略此节内容。 + + + +## 6. 模型推理部署 + + + +### 6.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + +### 6.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model_student \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_vehicle_attribute_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_vehicle_attributeibute_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +└── PPLCNet_x1_0_vehicle_attribute_infer + ├── inference.pdiparams + ├── inference.pdiparams.info + └── inference.pdmodel +``` + +**备注:** 此处的最佳权重是经过知识蒸馏后的权重路径,如果没有执行知识蒸馏的步骤,最佳模型保存在`output/PPLCNet_x1_0/best_model.pdparams`中。 + + + +### 6.1.2 直接下载 inference 模型 + +[6.1.1 小节](#6.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddleclas.bj.bcebos.com/models/PULC/vehicle_attribute_infer.tar && tar -xf vehicle_attribute_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── vehicle_attribute_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 6.2 基于 Python 预测引擎推理 + + + + +#### 6.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg` 进行车辆属性识别。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml -o Global.use_gpu=True +# 使用下面的命令使用 CPU 进行预测 +python3.7 python/predict_cls.py -c configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +0002_c002_00030670_0.jpg: {'attributes': 'Color: (yellow, prob: 0.9893478155136108), Type: (hatchback, prob: 0.9734099507331848)', 'output': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]} +``` + + + +#### 6.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3.7 python/predict_cls.py -c configs/PULC/vehicle_attribute/inference_vehicle_attribute.yaml -o Global.infer_imgs="./images/PULC/vehicle_attribute/" +``` + +终端中会输出该文件夹内所有图像的属性识别结果,如下所示。 + +``` +0002_c002_00030670_0.jpg: {'attributes': 'Color: (yellow, prob: 0.9893476963043213), Type: (hatchback, prob: 0.9734097719192505)', 'output': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]} +0014_c012_00040750_0.jpg: {'attributes': 'Color: (red, prob: 0.999872088432312), Type: (sedan, prob: 0.999976634979248)', 'output': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]} +``` + + + +### 6.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 6.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 6.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 6.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/advanced_tutorials/DataAugmentation.md b/docs/zh_CN/advanced_tutorials/DataAugmentation.md index 9e5159a4a148b75fa28d1cd774a8e8498e6da460..7097ff637b9f204f19d596445b2d0376e7b52d3b 100644 --- a/docs/zh_CN/advanced_tutorials/DataAugmentation.md +++ b/docs/zh_CN/advanced_tutorials/DataAugmentation.md @@ -1,33 +1,149 @@ # 数据增强分类实战 --- -本节将基于 ImageNet-1K 的数据集详细介绍数据增强实验,如果想快速体验此方法,可以参考 [**30 分钟玩转 PaddleClas(进阶版)**](../quick_start/quick_start_classification_professional.md)中基于 CIFAR100 的数据增强实验。如果想了解相关算法的内容,请参考[数据增强算法介绍](../algorithm_introduction/DataAugmentation.md)。 - - ## 目录 -- [1. 参数配置](#1) - - [1.1 AutoAugment](#1.1) - - [1.2 RandAugment](#1.2) - - [1.3 TimmAutoAugment](#1.3) - - [1.4 Cutout](#1.4) - - [1.5 RandomErasing](#1.5) - - [1.6 HideAndSeek](#1.6) - - [1.7 GridMask](#1.7) - - [1.8 Mixup](#1.8) - - [1.9 Cutmix](#1.9) - - [1.10 Mixup 与 Cutmix 同时使用](#1.10) -- [2. 启动命令](#2) -- [3. 注意事项](#3) -- [4. 实验结果](#4) +- [1. 算法介绍](#1) + - [1.1 数据增强简介](#1.1) + - [1.2 图像变换类数据增强](#1.2) + - [1.2.1 AutoAugment](#1.2.1) + - [1.2.1.1 AutoAugment 算法介绍](#1.2.1.1) + - [1.2.1.2 AutoAugment 配置](#1.2.1.2) + - [1.2.2 RandAugment](#1.2.2) + - [1.2.2.1 RandAugment 算法介绍](#1.2.2.1) + - [1.2.2.2 RandAugment 配置](#1.2.2.2) + - [1.2.3 TimmAutoAugment](#1.2.3) + - [1.2.3.1 TimmAutoAugment 算法介绍](#1.2.3.1) + - [1.2.3.2 TimmAutoAugment 配置](#1.2.3.2) + - [1.3 图像裁剪类数据增强](#1.3) + - [1.3.1 Cutout](#1.3.1) + - [1.3.1.1 Cutout 算法介绍](#1.3.1.1) + - [1.3.1.2 Cutout 配置](#1.3.1.2) + - [1.3.2 RandomErasing](#1.3.2) + - [1.3.2.1 RandomErasing 算法介绍](#1.3.2.1) + - [1.3.2.2 RandomErasing 配置](#1.3.2.2) + - [1.3.3 HideAndSeek](#1.3.3) + - [1.3.3.1 HideAndSeek 算法介绍](#1.3.3.1) + - [1.3.3.2 HideAndSeek 配置](#1.3.3.2) + - [1.3.4 GridMask](#1.3.4) + - [1.3.4.1 GridMask 算法介绍](#1.3.4.1) + - [1.3.4.2 GridMask 配置](#1.3.4.2) + - [1.4 图像混叠类数据增强](#1.4) + - [1.4.1 Mixup](#1.4.1) + - [1.4.1.1 Mixup 算法介绍](#1.4.1.1) + - [1.4.1.2 Mixup 配置](#1.4.1.2) + - [1.4.2 Cutmix](#1.4.2) + - [1.4.2.1 Cutmix 算法介绍](#1.4.2.1) + - [1.4.2.2 Cutmix 配置](#1.4.2.2) + - [1.4.2.3 Mixup 和 Cutmix 混合使用配置](#1.4.2.3) +- [2. 模型训练、评估和预测](#2) + - [2.1 环境配置](#2.1) + - [2.2 数据准备](#2.2) + - [2.3 模型训练](#2.3) + - [2.4 模型评估](#2.4) + - [2.5 模型预测](#2.5) +- [3. 参考文献](#4) + -## 1. 参数配置 -由于不同的数据增强方式含有不同的超参数,为了便于理解和使用,我们在 `configs/DataAugment` 里分别列举了 8 种训练 ResNet50 的数据增强方式的参数配置文件,用户可以在 `tools/run.sh` 里直接替换配置文件的路径即可使用。此处分别挑选了图像变换、图像裁剪、图像混叠中的一个示例展示,其他参数配置用户可以自查配置文件。 +## 1. 算法介绍 + +在图像分类任务中,图像数据的增广是一种常用的正则化方法,常用于数据量不足或者模型参数较多的场景。在本章节中,我们将对除 ImageNet 分类任务标准数据增强外的 8 种数据增强方式进行简单的介绍和对比,用户也可以将这些增广方法应用到自己的任务中,以获得模型精度的提升。这 8 种数据增强方式在 ImageNet 上的精度指标如下所示。 + +![](../../images/image_aug/main_image_aug.png) + +更具体的指标如下表所示: + + +| 模型 | 初始学习率策略 | l2 decay | batch size | epoch | 数据变化策略 | Top1 Acc | 论文中结论 | +|-------------|------------------|--------------|------------|-------|----------------|------------|----| +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | 标准变换 | 0.7731 | - | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | AutoAugment | 0.7795 | 0.7763 | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | mixup | 0.7828 | 0.7790 | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | cutmix | 0.7839 | 0.7860 | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | cutout | 0.7801 | - | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | gridmask | 0.7785 | 0.7790 | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | random-augment | 0.7770 | 0.7760 | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | random erasing | 0.7791 | - | +| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | hide and seek | 0.7743 | 0.7720 | -### 1.1 AutoAugment + +### 1.1. 数据增强简介 + +如果没有特殊说明,本章节中所有示例为 ImageNet 分类,并且假设最终输入网络的数据维度为:`[batch-size, 3, 224, 224]` + +其中 ImageNet 分类训练阶段的标准数据增强方式分为以下几个步骤: + +1. 图像解码:简写为 `ImageDecode` +2. 随机裁剪到长宽均为 224 的图像:简写为 `RandCrop` +3. 水平方向随机翻转:简写为 `RandFlip` +4. 图像数据的归一化:简写为 `Normalize` +5. 图像数据的重排,`[224, 224, 3]` 变为 `[3, 224, 224]`:简写为 `Transpose` +6. 多幅图像数据组成 batch 数据,如 `batch-size` 个 `[3, 224, 224]` 的图像数据拼组成 `[batch-size, 3, 224, 224]`:简写为 `Batch` + +相比于上述标准的图像增广方法,研究者也提出了很多改进的图像增广策略,这些策略均是在标准增广方法的不同阶段插入一定的操作,基于这些策略操作所处的不同阶段,我们将其分为了三类: + +1. 对 `RandCrop` 后的 224 的图像进行一些变换: AutoAugment,RandAugment +2. 对 `Transpose` 后的 224 的图像进行一些裁剪: CutOut,RandErasing,HideAndSeek,GridMask +3. 对 `Batch` 后的数据进行混合: Mixup,Cutmix + +增广后的可视化效果如下所示。 + +![](../../images/image_aug/image_aug_samples_s.jpg) + +具体如下表所示: + + +| 变换方法 | 输入 | 输出 | Auto-
Augment\[1\] | Rand-
Augment\[2\] | CutOut\[3\] | Rand
Erasing\[4\] | HideAnd-
Seek\[5\] | GridMask\[6\] | Mixup\[7\] | Cutmix\[8\] | +|-------------|---------------------------|---------------------------|------------------|------------------|-------------|------------------|------------------|---------------|------------|------------| +| Image
Decode | Binary | (224, 224, 3)
uint8 | Y | Y | Y | Y | Y | Y | Y | Y | +| RandCrop | (:, :, 3)
uint8 | (224, 224, 3)
uint8 | Y | Y | Y | Y | Y | Y | Y | Y | +| **Process** | (224, 224, 3)
uint8 | (224, 224, 3)
uint8 | Y | Y | \- | \- | \- | \- | \- | \- | +| RandFlip | (224, 224, 3)
uint8 | (224, 224, 3)
float32 | Y | Y | Y | Y | Y | Y | Y | Y | +| Normalize | (224, 224, 3)
uint8 | (3, 224, 224)
float32 | Y | Y | Y | Y | Y | Y | Y | Y | +| Transpose | (224, 224, 3)
float32 | (3, 224, 224)
float32 | Y | Y | Y | Y | Y | Y | Y | Y | +| **Process** | (3, 224, 224)
float32 | (3, 224, 224)
float32 | \- | \- | Y | Y | Y | Y | \- | \- | +| Batch | (3, 224, 224)
float32 | (N, 3, 224, 224)
float32 | Y | Y | Y | Y | Y | Y | Y | Y | +| **Process** | (N, 3, 224, 224)
float32 | (N, 3, 224, 224)
float32 | \- | \- | \- | \- | \- | \- | Y | Y | + + +PaddleClas 中集成了上述所有的数据增强策略,每种数据增强策略的参考论文与参考开源代码均在下面的介绍中列出。下文将介绍这些策略的原理与使用方法,并以下图为例,对变换后的效果进行可视化。为了说明问题,本章节中将 `RandCrop` 替换为 `Resize`。 + +![][test_baseline] + + + +### 1.2 图像变换类 + +图像变换类指的是对 `RandCrop` 后的 224 的图像进行一些变换,主要包括 + ++ AutoAugment ++ RandAugment ++ TimmAutoAugment + + + +#### 1.2.1 AutoAugment + + + +##### 1.2.1.1 AutoAugment 算法介绍 + +论文地址:[https://arxiv.org/abs/1805.09501v1](https://arxiv.org/abs/1805.09501v1) + +开源代码 github 地址:[https://github.com/DeepVoltaire/AutoAugment](https://github.com/DeepVoltaire/AutoAugment) + +不同于常规的人工设计图像增广方式,AutoAugment 是在一系列图像增广子策略的搜索空间中通过搜索算法找到的适合特定数据集的图像增广方案。针对 ImageNet 数据集,最终搜索出来的数据增强方案包含 25 个子策略组合,每个子策略中都包含两种变换,针对每幅图像都随机的挑选一个子策略组合,然后以一定的概率来决定是否执行子策略中的每种变换。 + +经过 AutoAugment 数据增强后结果如下图所示。 + +![][test_autoaugment] + + + +##### 1.2.1.2 AutoAugment 配置 `AotoAugment` 的图像增广方式的配置如下。`AutoAugment` 是在 uint8 的数据格式上转换的,所以其处理过程应该放在归一化操作(`NormalizeImage`)之前。 @@ -48,8 +164,31 @@ order: '' ``` - -### 1.2 RandAugment + + +#### 1.2.2 RandAugment + + + +##### 1.2.2.1 RandAugment 算法介绍 + +论文地址:[https://arxiv.org/pdf/1909.13719.pdf](https://arxiv.org/pdf/1909.13719.pdf) + +开源代码 github 地址:[https://github.com/heartInsert/randaugment](https://github.com/heartInsert/randaugment) + + +`AutoAugment` 的搜索方法比较暴力,直接在数据集上搜索针对该数据集的最优策略,其计算量很大。在 `RandAugment` 文章中作者发现,一方面,针对越大的模型,越大的数据集,使用 `AutoAugment` 方式搜索到的增广方式产生的收益也就越小;另一方面,这种搜索出的最优策略是针对该数据集的,其迁移能力较差,并不太适合迁移到其他数据集上。 + +在 `RandAugment` 中,作者提出了一种随机增广的方式,不再像 `AutoAugment` 中那样使用特定的概率确定是否使用某种子策略,而是所有的子策略都会以同样的概率被选择到,论文中的实验也表明这种数据增强方式即使在大模型的训练中也具有很好的效果。 + + +经过 RandAugment 数据增强后结果如下图所示。 + +![][test_randaugment] + + + +##### 1.2.2.2 RandAugment 配置 `RandAugment` 的图像增广方式的配置如下,其中用户需要指定其中的参数 `num_layers` 与 `magnitude`,默认的数值分别是 `2` 和 `5`。`RandAugment` 是在 uint8 的数据格式上转换的,所以其处理过程应该放在归一化操作(`NormalizeImage`)之前。 @@ -72,8 +211,21 @@ order: '' ``` - -### 1.3 TimmAutoAugment + + +#### 1.2.3 TimmAutoAugment + + + +##### 1.2.3.1 TimmAutoAugment 算法介绍 + +开源代码 github 地址:[https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py) + +`TimmAutoAugment` 是开源作者对 AutoAugment 和 RandAugment 的改进,事实证明,其在很多视觉任务上有更好的表现,目前绝大多数 VisionTransformer 模型都是基于 TimmAutoAugment 去实现的。 + + + +##### 1.2.3.2 TimmAutoAugment 配置 `TimmAutoAugment` 的图像增广方式的配置如下,其中用户需要指定其中的参数 `config_str`、`interpolation`、`img_size`,默认的数值分别是 `rand-m9-mstd0.5-inc1`、`bicubic`、`224`。`TimmAutoAugment` 是在 uint8 的数据格式上转换的,所以其处理过程应该放在归一化操作(`NormalizeImage`)之前。 @@ -97,8 +249,43 @@ order: '' ``` - -### 1.4 Cutout + + +### 1.3 图像裁剪类 + +图像裁剪类主要是对 `Transpose` 后的 224 的图像进行一些裁剪,并将裁剪区域的像素值置为特定的常数(默认为 0),主要包括: + ++ CutOut ++ RandErasing ++ HideAndSeek ++ GridMask + +图像裁剪的这些增广并非一定要放在归一化之后,也有不少实现是放在归一化之前的,也就是直接对 uint8 的图像进行操作,两种方式的差别是:如果直接对 uint8 的图像进行操作,那么再经过归一化之后被裁剪的区域将不再是纯黑或纯白(减均值除方差之后像素值不为 0)。而对归一后之后的数据进行操作,裁剪的区域会是纯黑或纯白。 + +上述的裁剪变换思路是相同的,都是为了解决训练出的模型在有遮挡数据上泛化能力较差的问题,不同的是他们的裁剪方式、区域不太一样。 + + + +#### 1.3.1 Cutout + + + +##### 1.3.1.1 Cutout 算法介绍 + +论文地址:[https://arxiv.org/abs/1708.04552](https://arxiv.org/abs/1708.04552) + +开源代码 github 地址:[https://github.com/uoguelph-mlrg/Cutout](https://github.com/uoguelph-mlrg/Cutout) + +Cutout 可以理解为 Dropout 的一种扩展操作,不同的是 Dropout 是对图像经过网络后生成的特征进行遮挡,而 Cutout 是直接对输入的图像进行遮挡,相对于 Dropout 对噪声的鲁棒性更好。作者在论文中也进行了说明,这样做法有以下两点优势:(1)通过 Cutout 可以模拟真实场景中主体被部分遮挡时的分类场景;(2)可以促进模型充分利用图像中更多的内容来进行分类,防止网络只关注显著性的图像区域,从而发生过拟合。 + + +经过 RandAugment 数据增强后结果如下图所示。 + +![][test_cutout] + + + +##### 1.3.1.2 Cutout 配置 `Cutout` 的图像增广方式的配置如下,其中用户需要指定其中的参数 `n_holes` 与 `length`,默认的数值分别是 `1` 和 `112`。类似其他图像裁剪类的数据增强方式,`Cutout` 既可以在 uint8 格式的数据上操作,也可以在归一化)(`NormalizeImage`)后的数据上操作,此处给出的是在归一化后的操作。 @@ -121,8 +308,31 @@ length: 112 ``` - -### 1.5 RandomErasing + + + +#### 1.3.2 RandomErasing + + + +##### 1.3.2.1 RandomErasing 算法介绍 + +论文地址:[https://arxiv.org/pdf/1708.04896.pdf](https://arxiv.org/pdf/1708.04896.pdf) + +开源代码 github 地址:[https://github.com/zhunzhong07/Random-Erasing](https://github.com/zhunzhong07/Random-Erasing) + +`RandomErasing` 与 `Cutout` 方法类似,同样是为了解决训练出的模型在有遮挡数据上泛化能力较差的问题,作者在论文中也指出,随机裁剪的方式与随机水平翻转具有一定的互补性。作者也在行人再识别(REID)上验证了该方法的有效性。与 `Cutout` 不同的是,在 `RandomErasing` 中,图片以一定的概率接受该种预处理方法,生成掩码的尺寸大小与长宽比也是根据预设的超参数随机生成。 + + +PaddleClas 中 `RandomErasing` 的使用方法如下所示。 + +经过 RandomErasing 数据增强后结果如下图所示。 + +![][test_randomerassing] + + + +##### 1.3.2.2 RandomErasing 配置 `RandomErasing` 的图像增广方式的配置如下,其中用户需要指定其中的参数 `EPSILON`、`sl`、`sh`、`r1`、`attempt`、`use_log_aspect`、`mode`,默认的数值分别是 `0.25`、`0.02`、`1.0/3.0`、`0.3`、`10`、`True`、`pixel`。类似其他图像裁剪类的数据增强方式,`RandomErasing` 既可以在 uint8 格式的数据上操作,也可以在归一化(`NormalizeImage`)后的数据上操作,此处给出的是在归一化后的操作。 @@ -150,8 +360,35 @@ mode: pixel ``` - -### 1.6 HideAndSeek + + +#### 1.3.3 HideAndSeek + + + +##### 1.3.3.1 HideAndSeek 算法介绍 + +论文地址:[https://arxiv.org/pdf/1811.02545.pdf](https://arxiv.org/pdf/1811.02545.pdf) + +开源代码 github 地址:[https://github.com/kkanshul/Hide-and-Seek](https://github.com/kkanshul/Hide-and-Seek) + + +`HideAndSeek` 论文将图像分为若干块区域(patch),对于每块区域,都以一定的概率生成掩码,不同区域的掩码含义如下图所示。 + + +![][hide_and_seek_mask_expanation] + + +PaddleClas 中 `HideAndSeek` 的使用方法如下所示。 + + +经过 HideAndSeek 数据增强后结果如下图所示。 + +![][test_hideandseek] + + + +##### 1.3.3.2 HideAndSeek 配置 `HideAndSeek` 的图像增广方式的配置如下。类似其他图像裁剪类的数据增强方式,`HideAndSeek` 既可以在 uint8 格式的数据上操作,也可以在归一化(`NormalizeImage`)后的数据上操作,此处给出的是在归一化后的操作。 @@ -172,9 +409,43 @@ - HideAndSeek: ``` - + + +#### 1.3.4 GridMask + + + +##### 1.3.4.1 GridMask 算法介绍 + +论文地址:[https://arxiv.org/abs/2001.04086](https://arxiv.org/abs/2001.04086) + +开源代码 github 地址:[https://github.com/akuxcw/GridMask](https://github.com/akuxcw/GridMask) -### 1.7 GridMask + +作者在论文中指出,此前存在的基于对图像 crop 的方法存在两个问题,如下图所示: + +1. 过度删除区域可能造成目标主体大部分甚至全部被删除,或者导致上下文信息的丢失,导致增广后的数据成为噪声数据; +2. 保留过多的区域,对目标主体及上下文基本产生不了什么影响,失去增广的意义。 + +![][gridmask-0] + +因此如果避免过度删除或过度保留成为需要解决的核心问题。 + + +`GridMask` 是通过生成一个与原图分辨率相同的掩码,并将掩码进行随机翻转,与原图相乘,从而得到增广后的图像,通过超参数控制生成的掩码网格的大小。 + + +在训练过程中,有两种以下使用方法: +1. 设置一个概率 p,从训练开始就对图片以概率 p 使用 `GridMask` 进行增广。 +2. 一开始设置增广概率为 0,随着迭代轮数增加,对训练图片进行 `GridMask` 增广的概率逐渐增大,最后变为 p。 + +论文中验证上述第二种方法的训练效果更好一些。 + +经过 GridMask 数据增强后结果如下图所示。 + + + +##### 1.3.4.2 GridMask 配置 `GridMask` 的图像增广方式的配置如下,其中用户需要指定其中的参数 `d1`、`d2`、`rotate`、`ratio`、`mode`, 默认的数值分别是 `96`、`224`、`1`、`0.5`、`0`。类似其他图像裁剪类的数据增强方式,`GridMask` 既可以在 uint8 格式的数据上操作,也可以在归一化(`NormalizeImage`)后的数据上操作,此处给出的是在归一化后的操作。 @@ -200,8 +471,43 @@ mode: 0 ``` - -### 1.8 Mixup +![][test_gridmask] + + + +### 1.4 图像混叠类 + +图像混叠主要对 `Batch` 后的数据进行混合,包括: + ++ Mixup ++ Cutmix + +前文所述的图像变换与图像裁剪都是针对单幅图像进行的操作,而图像混叠是对两幅图像进行融合,生成一幅图像,两种方法的主要区别为混叠的方式不太一样。 + + + +#### 1.4.1 Mixup + + + +##### 1.4.1.1 Mixup 算法介绍 + +论文地址:[https://arxiv.org/pdf/1710.09412.pdf](https://arxiv.org/pdf/1710.09412.pdf) + +开源代码 github 地址:[https://github.com/facebookresearch/mixup-cifar10](https://github.com/facebookresearch/mixup-cifar10) + +Mixup 是最先提出的图像混叠增广方案,其原理简单、方便实现,不仅在图像分类上,在目标检测上也取得了不错的效果。为了便于实现,通常只对一个 batch 内的数据进行混叠,在 `Cutmix` 中也是如此。 + +如下是 `imaug` 中的实现,需要指出的是,下述实现会出现对同一幅进行相加的情况,也就是最终得到的图和原图一样,随着 `batch-size` 的增加这种情况出现的概率也会逐渐减小。 + + +经过 Mixup 数据增强结果如下图所示。 + +![][test_mixup] + + + +##### 1.4.1.2 Mixup 配置 `Mixup` 的图像增广方式的配置如下,其中用户需要指定其中的参数 `alpha`,默认的数值是 `0.2`。类似其他图像混合类的数据增强方式,`Mixup` 是在图像做完数据处理后将每个 batch 内的数据做图像混叠,将混叠后的图像和标签输入网络中训练,所以其是在图像数据处理(图像变换、图像裁剪)后操作。 @@ -224,8 +530,26 @@ alpha: 0.2 ``` - -### 1.9 Cutmix + +#### 1.4.2 Cutmix + + + +##### 1.4.2.1 Cutmix 算法介绍 + +论文地址:[https://arxiv.org/pdf/1905.04899v2.pdf](https://arxiv.org/pdf/1905.04899v2.pdf) + +开源代码 github 地址:[https://github.com/clovaai/CutMix-PyTorch](https://github.com/clovaai/CutMix-PyTorch) + +与 `Mixup` 直接对两幅图进行相加不一样,`Cutmix` 是从一幅图中随机裁剪出一个 `ROI`,然后覆盖当前图像中对应的区域,代码实现如下所示: + +经过 Cutmix 数据增强后结果如下图所示。 + +![][test_cutmix] + + + +##### 1.4.2.2 Cutmix 配置 `Cutmix` 的图像增广方式的配置如下,其中用户需要指定其中的参数 `alpha`,默认的数值是 `0.2`。类似其他图像混合类的数据增强方式,`Cutmix` 是在图像做完数据处理后将每个 batch 内的数据做图像混叠,将混叠后的图像和标签输入网络中训练,所以其是在图像数据处理(图像变换、图像裁剪)后操作。 @@ -248,8 +572,9 @@ alpha: 0.2 ``` - -### 1.10 Mixup 与 Cutmix 同时使用 + + +##### 1.4.2.3 Mixup 和 Cutmix 混合使用配置 `Mixup` 与 `Cutmix` 同时使用的配置如下,其中用户需要指定额外的参数 `prob`,该参数控制不同数据增强的概率,默认为 `0.5`。 @@ -277,55 +602,149 @@ ``` -## 2. 启动命令 -当用户配置完训练环境后,类似于训练其他分类任务,只需要将 `tools/train.sh` 中的配置文件替换成为相应的数据增强方式的配置文件即可。 +## 2. 模型训练、评估和预测 + + -其中 `train.sh` 中的内容如下: +### 2.1 环境配置 -```bash +* 安装:请先参考 [Paddle 安装教程](../installation/install_paddle.md) 以及 [PaddleClas 安装教程](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 2.2 数据准备 + +请在[ImageNet 官网](https://www.image-net.org/)准备 ImageNet-1k 相关的数据。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,将下载好的数据命名为 `ILSVRC2012` ,存放于此。 `ILSVRC2012` 目录中具有以下数据: + +``` +├── train +│   ├── n01440764 +│   │   ├── n01440764_10026.JPEG +│   │   ├── n01440764_10027.JPEG +├── train_list.txt +... +├── val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +├── val_list.txt +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + +**备注:** +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + + +### 2.3 模型训练 + + +在 `ppcls/configs/ImageNet/DataAugment` 中提供了基于 ResNet50 的不同的数据增强的训练配置,这里以使用 `AutoAugment` 为例,介绍数据增强的使用方法。可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 python3 -m paddle.distributed.launch \ - --selected_gpus="0,1,2,3" \ - --log_dir=ResNet50_Cutout \ + --gpus="0,1,2,3" \ tools/train.py \ - -c ./ppcls/configs/ImageNet/DataAugment/ResNet50_Cutout.yaml + -c ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml ``` -运行 `train.sh`: + +**备注:** + +* 1.当前精度最佳的模型会保存在 `output/ResNet50/best_model.pdparams`。 +* 2.如需更改数据增强类型,只需要替换`ppcls/configs/ImageNet/DataAugment`中的其他的配置文件即可。 +* 3.如果希望多种数据增强混合使用,请参考[第 2 节](#2)中的相关配置更改配置文件中的数据增强即可。 +* 4.由于图像混叠时需对 label 进行混叠,无法计算训练数据的准确率,所以在训练过程中没有打印训练准确率。 +* 5.在使用数据增强后,由于训练数据更难,所以训练损失函数可能较大,训练集的准确率相对较低,但其有拥更好的泛化能力,所以验证集的准确率相对较高。 +* 6.在使用数据增强后,模型可能会趋于欠拟合状态,建议可以适当的调小 `l2_decay` 的值来获得更高的验证集准确率。 +* 7.几乎每一类图像增强均含有超参数,我们只提供了基于 ImageNet-1k 的超参数,其他数据集需要用户自己调试超参数,具体超参数的含义用户可以阅读相关的论文,调试方法也可以参考[训练技巧](../models_training/train_strategy.md)。 + + + +### 2.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 ```bash -sh tools/train.sh +python3 tools/eval.py \ + -c ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml \ + -o Global.pretrained_model=output/ResNet50/best_model ``` +其中 `-o Global.pretrained_model="output/ResNet50/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 2.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ppcls/configs/ImageNet/DataAugment/ResNet50_AutoAugment.yaml \ + -o Global.pretrained_model=output/ResNet50/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [8, 7, 86, 81, 85], 'scores': [0.91347, 0.03779, 0.0036, 0.00117, 0.00112], 'file_name': 'docs/images/inference_deployment/whl_demo.jpg', 'label_names': ['hen', 'cock', 'partridge', 'ptarmigan', 'quail']}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/ResNet50/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `docs/images/inference_deployment/whl_demo.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 默认输出的是 Top-5 的值,如果希望输出 Top-k 的值,可以指定`-o Infer.PostProcess.topk=k`,其中,`k` 为您指定的值。 + + + -## 3. 注意事项 -* 由于图像混叠时需对 label 进行混叠,无法计算训练数据的准确率,所以在训练过程中没有打印训练准确率。 +## 3.参考文献 -* 在使用数据增强后,由于训练数据更难,所以训练损失函数可能较大,训练集的准确率相对较低,但其有拥更好的泛化能力,所以验证集的准确率相对较高。 +[1] Cubuk E D, Zoph B, Mane D, et al. Autoaugment: Learning augmentation strategies from data[C]//Proceedings of the IEEE conference on computer vision and pattern recognition. 2019: 113-123. -* 在使用数据增强后,模型可能会趋于欠拟合状态,建议可以适当的调小 `l2_decay` 的值来获得更高的验证集准确率。 -* 几乎每一类图像增强均含有超参数,我们只提供了基于 ImageNet-1k 的超参数,其他数据集需要用户自己调试超参数,具体超参数的含义用户可以阅读相关的论文,调试方法也可以参考[训练技巧](../models_training/train_strategy.md)。 +[2] Cubuk E D, Zoph B, Shlens J, et al. Randaugment: Practical automated data augmentation with a reduced search space[J]. arXiv preprint arXiv:1909.13719, 2019. - -## 4. 实验结果 +[3] DeVries T, Taylor G W. Improved regularization of convolutional neural networks with cutout[J]. arXiv preprint arXiv:1708.04552, 2017. + +[4] Zhong Z, Zheng L, Kang G, et al. Random erasing data augmentation[J]. arXiv preprint arXiv:1708.04896, 2017. + +[5] Singh K K, Lee Y J. Hide-and-seek: Forcing a network to be meticulous for weakly-supervised object and action localization[C]//2017 IEEE international conference on computer vision (ICCV). IEEE, 2017: 3544-3553. + +[6] Chen P. GridMask Data Augmentation[J]. arXiv preprint arXiv:2001.04086, 2020. + +[7] Zhang H, Cisse M, Dauphin Y N, et al. mixup: Beyond empirical risk minimization[J]. arXiv preprint arXiv:1710.09412, 2017. + +[8] Yun S, Han D, Oh S J, et al. Cutmix: Regularization strategy to train strong classifiers with localizable features[C]//Proceedings of the IEEE International Conference on Computer Vision. 2019: 6023-6032. -基于 PaddleClas,在 ImageNet1k 数据集上的分类精度如下。 -| 模型 | 初始学习率策略 | l2 decay | batch size | epoch | 数据变化策略 | Top1 Acc | 论文中结论 | -|-------------|------------------|--------------|------------|-------|----------------|------------|----| -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | 标准变换 | 0.7731 | - | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | AutoAugment | 0.7795 | 0.7763 | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | mixup | 0.7828 | 0.7790 | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | cutmix | 0.7839 | 0.7860 | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | cutout | 0.7801 | - | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | gridmask | 0.7785 | 0.7790 | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | random-augment | 0.7770 | 0.7760 | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | random erasing | 0.7791 | - | -| ResNet50 | 0.1/cosine_decay | 0.0001 | 256 | 300 | hide and seek | 0.7743 | 0.7720 | -**注意**: -* 在这里的实验中,为了便于对比,我们将 l2 decay 固定设置为 1e-4,在实际使用中,我们推荐尝试使用更小的 l2 decay。结合数据增强,我们发现将 l2 decay 由 1e-4 减小为 7e-5 均能带来至少 0.3~0.5% 的精度提升。 -* 我们目前尚未对不同策略进行组合并验证效果,这一块后续我们会开展更多的对比实验,敬请期待。 +[test_baseline]: ../../images/image_aug/test_baseline.jpeg +[test_autoaugment]: ../../images/image_aug/test_autoaugment.jpeg +[test_cutout]: ../../images/image_aug/test_cutout.jpeg +[test_gridmask]: ../../images/image_aug/test_gridmask.jpeg +[gridmask-0]: ../../images/image_aug/gridmask-0.png +[test_hideandseek]: ../../images/image_aug/test_hideandseek.jpeg +[test_randaugment]: ../../images/image_aug/test_randaugment.jpeg +[test_randomerassing]: ../../images/image_aug/test_randomerassing.jpeg +[hide_and_seek_mask_expanation]: ../../images/image_aug/hide-and-seek-visual.png +[test_mixup]: ../../images/image_aug/test_mixup.png +[test_cutmix]: ../../images/image_aug/test_cutmix.png diff --git a/docs/zh_CN/advanced_tutorials/knowledge_distillation.md b/docs/zh_CN/advanced_tutorials/knowledge_distillation.md index d3e6d77cf254a933fd6e6776e361f2c499b5c14d..18bb25f2ef0237da8292f4e182ff7919e777b8ce 100644 --- a/docs/zh_CN/advanced_tutorials/knowledge_distillation.md +++ b/docs/zh_CN/advanced_tutorials/knowledge_distillation.md @@ -1,209 +1,412 @@ -# 知识蒸馏 +# 知识蒸馏实战 ## 目录 - - [1. 模型压缩与知识蒸馏方法简介](#1) - - [2. SSLD 蒸馏策略](#2) - - [2.1 简介](#2.1) - - [2.2 数据选择](#2.2) - - [3. 实验](#3) - - [3.1 教师模型的选择](#3.1) - - [3.2 大数据蒸馏](#3.2) - - [3.3 ImageNet1k 训练集 finetune](#3.3) - - [3.4 数据增广以及基于 Fix 策略的微调](#3.4) - - [3.5 实验过程中的一些问题](#3.5) - - [4. 蒸馏模型的应用](#4) - - [4.1 使用方法](#4.1) - - [4.2 迁移学习 finetune](#4.2) - - [4.3 目标检测](#4.3) - - [5. SSLD 实战](#5) - - [5.1 参数配置](#5.1) - - [5.2 启动命令](#5.2) - - [5.3 注意事项](#5.3) - - [6. 参考文献](#6) + +- [1. 算法介绍](#1) + - [1.1 知识蒸馏简介](#1.1) + - [1.1.1 Response based distillation](#1.1.1) + - [1.1.2 Feature based distillation](#1.1.2) + - [1.1.3 Relation based distillation](#1.1.3) + - [1.2 PaddleClas支持的知识蒸馏算法](#1.2) + - [1.2.1 SSLD](#1.2.1) + - [1.2.2 DML](#1.2.2) + - [1.2.3 UDML](#1.2.3) + - [1.2.4 AFD](#1.2.4) + - [1.2.5 DKD](#1.2.5) + - [1.2.6 DIST](#1.2.6) +- [2. 使用方法](#2) + - [2.1 环境配置](#2.1) + - [2.2 数据准备](#2.2) + - [2.3 模型训练](#2.3) + - [2.4 模型评估](#2.4) + - [2.5 模型预测](#2.5) + - [2.6 模型导出与推理](#2.6) +- [3. 参考文献](#3) + + -## 1. 模型压缩与知识蒸馏方法简介 + +## 1. 算法介绍 + + + +### 1.1 知识蒸馏简介 近年来,深度神经网络在计算机视觉、自然语言处理等领域被验证是一种极其有效的解决问题的方法。通过构建合适的神经网络,加以训练,最终网络模型的性能指标基本上都会超过传统算法。 在数据量足够大的情况下,通过合理构建网络模型的方式增加其参数量,可以显著改善模型性能,但是这又带来了模型复杂度急剧提升的问题。大模型在实际场景中使用的成本较高。 -深度神经网络一般有较多的参数冗余,目前有几种主要的方法对模型进行压缩,减小其参数量。如裁剪、量化、知识蒸馏等,其中知识蒸馏是指使用教师模型(teacher model)去指导学生模型(student model)学习特定任务,保证小模型在参数量不变的情况下,得到比较大的性能提升,甚至获得与大模型相似的精度指标 [1]。 PaddleClas 融合已有的蒸馏方法 [2,3],提供了一种简单的半监督标签知识蒸馏方案(SSLD,Simple Semi-supervised Label Distillation),基于 ImageNet1k 分类数据集,在 ResNet_vd 以及 MobileNet 系列上的精度均有超过 3% 的绝对精度提升,具体指标如下图所示。 +深度神经网络一般有较多的参数冗余,目前有几种主要的方法对模型进行压缩,减小其参数量。如裁剪、量化、知识蒸馏等,其中知识蒸馏是指使用教师模型(teacher model)去指导学生模型(student model)学习特定任务,保证小模型在参数量不变的情况下,得到比较大的性能提升,甚至获得与大模型相似的精度指标 [1]。 -![](../../images/distillation/distillation_perform_s.jpg) - -## 2. SSLD 蒸馏策略 +根据蒸馏方式的不同,可以将知识蒸馏方法分为3个不同的类别:Response based distillation、Feature based distillation、Relation based distillation。下面进行详细介绍。 - -### 2.1 简介 + -SSLD 的流程图如下图所示。 +#### 1.1.1 Response based distillation -![](../../images/distillation/ppcls_distillation.png) -首先,我们从 ImageNet22k 中挖掘出了近 400 万张图片,同时与 ImageNet-1k 训练集整合在一起,得到了一个新的包含 500 万张图片的数据集。然后,我们将学生模型与教师模型组合成一个新的网络,该网络分别输出学生模型和教师模型的预测分布,与此同时,固定教师模型整个网络的梯度,而学生模型可以做正常的反向传播。最后,我们将两个模型的 logits 经过 softmax 激活函数转换为 soft label,并将二者的 soft label 做 JS 散度作为损失函数,用于蒸馏模型训练。下面以 MobileNetV3(该模型直接训练,精度为 75.3%)的知识蒸馏为例,介绍该方案的核心关键点(baseline 为 79.12% 的 ResNet50_vd 模型蒸馏 MobileNetV3,训练集为 ImageNet1k 训练集,loss 为 cross entropy loss,迭代轮数为 120epoch,精度指标为 75.6%)。 +最早的知识蒸馏算法 KD,由 Hinton 提出,训练的损失函数中除了 gt loss 之外,还引入了学生模型与教师模型输出的 KL 散度,最终精度超过单纯使用 gt loss 训练的精度。这里需要注意的是,在训练的时候,需要首先训练得到一个更大的教师模型,来指导学生模型的训练过程。 -* 教师模型的选择。在进行知识蒸馏时,如果教师模型与学生模型的结构差异太大,蒸馏得到的结果反而不会有太大收益。相同结构下,精度更高的教师模型对结果也有很大影响。相比于 79.12% 的 ResNet50_vd 教师模型,使用 82.4% 的 ResNet50_vd 教师模型可以带来 0.4% 的绝对精度收益(`75.6%->76.0%`)。 +PaddleClas 中提出了一种简单使用的 SSLD 知识蒸馏算法 [6],在训练的时候去除了对 gt label 的依赖,结合大量无标注数据,最终蒸馏训练得到的预训练模型在 15 个模型上的精度提升平均高达 3%。 -* 改进 loss 计算方法。分类 loss 计算最常用的方法就是 cross entropy loss,我们经过实验发现,在使用 soft label 进行训练时,相对于 cross entropy loss,KL div loss 对模型性能提升几乎无帮助,但是使用具有对称特性的 JS div loss 时,在多个蒸馏任务上相比 cross entropy loss 均有 0.2% 左右的收益(`76.0%->76.2%`),SSLD 中也基于 JS div loss 展开实验。 +上述标准的蒸馏方法是通过一个大模型作为教师模型来指导学生模型提升效果,而后来又发展出 DML(Deep Mutual Learning)互学习蒸馏方法 [7],即通过两个结构相同的模型互相学习。具体的。相比于 KD 等依赖于大的教师模型的知识蒸馏算法,DML 脱离了对大的教师模型的依赖,蒸馏训练的流程更加简单,模型产出效率也要更高一些。 -* 更多的迭代轮数。蒸馏的 baseline 实验只迭代了 120 个 epoch 。实验发现,迭代轮数越多,蒸馏效果越好,最终我们迭代了 360 epoch,精度指标可以达到 77.1%(`76.2%->77.1%`)。 + -* 无需数据集的真值标签,很容易扩展训练集。 SSLD 的 loss 在计算过程中,仅涉及到教师和学生模型对于相同图片的处理结果(经过 softmax 激活函数处理之后的 soft label),因此即使图片数据不包含真值标签,也可以用来进行训练并提升模型性能。该蒸馏方案的无标签蒸馏策略也大大提升了学生模型的性能上限(`77.1%->78.5%`)。 +#### 1.1.2 Feature based distillation -* ImageNet1k 蒸馏 finetune 。 我们仅使用 ImageNet1k 数据,使用蒸馏方法对上述模型进行 finetune,最终仍然可以获得 0.4% 的性能提升(`78.5%->78.9%`)。 +Heo 等人提出了 OverHaul [8], 计算学生模型与教师模型的 feature map distance,作为蒸馏的 loss,在这里使用了学生模型、教师模型的转移,来保证二者的 feature map 可以正常地进行 distance 的计算。 +基于 feature map distance 的知识蒸馏方法也能够和 `3.1 章节` 中的基于 response 的知识蒸馏算法融合在一起,同时对学生模型的输出结果和中间层 feature map 进行监督。而对于 DML 方法来说,这种融合过程更为简单,因为不需要对学生和教师模型的 feature map 进行转换,便可以完成对齐(alignment)过程。PP-OCRv2 系统中便使用了这种方法,最终大幅提升了 OCR 文字识别模型的精度。 - -### 2.2 数据选择 + -* SSLD 蒸馏方案的一大特色就是无需使用图像的真值标签,因此可以任意扩展数据集的大小,考虑到计算资源的限制,我们在这里仅基于 ImageNet22k 数据集对蒸馏任务的训练集进行扩充。在 SSLD 蒸馏任务中,我们使用了 `Top-k per class` 的数据采样方案 [3] 。具体步骤如下。 - * 训练集去重。我们首先基于 SIFT 特征相似度匹配的方式对 ImageNet22k 数据集与 ImageNet1k 验证集进行去重,防止添加的 ImageNet22k 训练集中包含 ImageNet1k 验证集图像,最终去除了 4511 张相似图片。部分过滤的相似图片如下所示。 +#### 1.1.3 Relation based distillation - ![](../../images/distillation/22k_1k_val_compare_w_sift.png) +[1.1.1](#1.1.1) 和 [1.1.2](#1.1.2) 章节中的论文中主要是考虑到学生模型与教师模型的输出或者中间层 feature map,这些知识蒸馏算法只关注个体的输出结果,没有考虑到个体之间的输出关系。 - * 大数据集 soft label 获取,对于去重后的 ImageNet22k 数据集,我们使用 `ResNeXt101_32x16d_wsl` 模型进行预测,得到每张图片的 soft label 。 - * Top-k 数据选择,ImageNet1k 数据共有 1000 类,对于每一类,找出属于该类并且得分最高的 `k` 张图片,最终得到一个数据量不超过 `1000*k` 的数据集(某些类上得到的图片数量可能少于 `k` 张)。 - * 将该数据集与 ImageNet1k 的训练集融合组成最终蒸馏模型所使用的数据集,数据量为 500 万。 +Park 等人提出了 RKD [10],基于关系的知识蒸馏算法,RKD 中进一步考虑个体输出之间的关系,使用 2 种损失函数,二阶的距离损失(distance-wise)和三阶的角度损失(angle-wise) - -## 3. 实验 -* PaddleClas 的蒸馏策略为`大数据集训练 + ImageNet1k 蒸馏 finetune` 的策略。选择合适的教师模型,首先在挑选得到的 500 万数据集上进行训练,然后在 ImageNet1k 训练集上进行 finetune,最终得到蒸馏后的学生模型。 +本论文提出的算法关系知识蒸馏(RKD)迁移教师模型得到的输出结果间的结构化关系给学生模型,不同于之前的只关注个体输出结果,RKD 算法使用两种损失函数:二阶的距离损失(distance-wise)和三阶的角度损失(angle-wise)。在最终计算蒸馏损失函数的时候,同时考虑 KD loss 和 RKD loss。最终精度优于单独使用 KD loss 蒸馏得到的模型精度。 + + + +### 1.2 PaddleClas支持的知识蒸馏算法 + + - -### 3.1 教师模型的选择 +#### 1.2.1 SSLD + +##### 1.2.1.1 SSLD 算法介绍 + +论文信息: + +> [Beyond Self-Supervision: A Simple Yet Effective Network Distillation Alternative to Improve Backbones +](https://arxiv.org/abs/2103.05959) +> +> Cheng Cui, Ruoyu Guo, Yuning Du, Dongliang He, Fu Li, Zewu Wu, Qiwen Liu, Shilei Wen, Jizhou Huang, Xiaoguang Hu, Dianhai Yu, Errui Ding, Yanjun Ma +> +> arxiv, 2021 + +SSLD是百度于2021年提出的一种简单的半监督知识蒸馏方案,通过设计一种改进的JS散度作为损失函数,结合基于ImageNet22k数据集的数据挖掘策略,最终帮助15个骨干网络模型的精度平均提升超过3%。 + +更多关于SSLD的原理、模型库与使用介绍,请参考:[SSLD知识蒸馏算法介绍](./ssld.md)。 + + +##### 1.2.1.2 SSLD 配置 + +SSLD配置如下所示。在模型构建Arch字段中,需要同时定义学生模型与教师模型,教师模型固定梯度,并且加载预训练参数。在损失函数Loss字段中,需要定义`DistillationDMLLoss`,作为训练的损失函数。 + +```yaml +# model architecture +Arch: + name: "DistillationModel" # 模型名称,这里使用的是蒸馏模型, + class_num: &class_num 1000 # 类别数量,对于ImageNet1k数据集来说,类别数为1000 + pretrained_list: # 预训练模型列表,因为在下面的子网络中指定了预训练模型,这里无需指定 + freeze_params_list: # 固定网络参数列表,为True时,表示固定该index对应的网络 + - True + - False + infer_model_name: "Student" # 在模型导出的时候,会导出Student子网络 + models: # 子网络列表 + - Teacher: # 教师模型 + name: ResNet50_vd # 模型名称 + class_num: *class_num # 类别数 + pretrained: True # 预训练模型路径,如果为True,则会从官网下载默认的预训练模型 + use_ssld: True # 是否使用SSLD蒸馏得到的预训练模型,精度会更高一些 + - Student: # 学生模型 + name: PPLCNet_x2_5 # 模型名称 + class_num: *class_num # 类别数 + pretrained: False # 预训练模型路径,可以指定为bool值或者字符串,这里为False,表示学生模型默认不加载预训练模型 + +# loss function config for traing/eval process +Loss: # 定义损失函数 + Train: # 定义训练的损失函数,为列表形式 + - DistillationDMLLoss: # 蒸馏的DMLLoss,对DMLLoss进行封装,支持蒸馏结果(dict形式)的损失函数计算 + weight: 1.0 # loss权重 + model_name_pairs: # 用于计算的模型对,这里表示计算Student和Teacher输出的损失函数 + - ["Student", "Teacher"] + Eval: # 定义评估时的损失函数 + - CELoss: + weight: 1.0 +``` -为了验证教师模型和学生模型的模型大小差异和教师模型的模型精度对蒸馏结果的影响,我们做了几组实验验证。训练策略统一为:`cosine_decay_warmup,lr=1.3, epoch=120, bs=2048`,学生模型均为从头训练。 + -|Teacher Model | Teacher Top1 | Student Model | Student Top1| -|- |:-: |:-: | :-: | -| ResNeXt101_32x16d_wsl | 84.2% | MobileNetV3_large_x1_0 | 75.78% | -| ResNet50_vd | 79.12% | MobileNetV3_large_x1_0 | 75.60% | -| ResNet50_vd | 82.35% | MobileNetV3_large_x1_0 | 76.00% | +#### 1.2.2 DML +##### 1.2.2.1 DML 算法介绍 -从表中可以看出 +论文信息: -> 教师模型结构相同时,其精度越高,最终的蒸馏效果也会更好一些。 +> [Deep Mutual Learning](https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_Deep_Mutual_Learning_CVPR_2018_paper.html) +> +> Ying Zhang, Tao Xiang, Timothy M. Hospedales, Huchuan Lu > -> 教师模型与学生模型的模型大小差异不宜过大,否则反而会影响蒸馏结果的精度。 +> CVPR, 2018 + +DML论文中,在蒸馏的过程中,不依赖于教师模型,两个结构相同的模型互相学习,计算彼此输出(logits)的KL散度,最终完成训练过程。 + + +在ImageNet1k公开数据集上,效果如下所示。 + +| 策略 | 骨干网络 | 配置文件 | Top-1 acc | 下载链接 | +| --- | --- | --- | --- | --- | +| baseline | PPLCNet_x2_5 | [PPLCNet_x2_5.yaml](../../../ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml) | 74.93% | - | +| DML | PPLCNet_x2_5 | [PPLCNet_x2_5_dml.yaml](../../../ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml) | 76.68%(**+1.75%**) | - | + + +* 注:完整的PPLCNet_x2_5模型训练了360epoch,这里为了方便对比,baseline和DML均训练了100epoch,因此指标比官网最终开源出来的模型精度(76.60%)低一些。 + + +##### 1.2.2.2 DML 配置 + +DML配置如下所示。在模型构建Arch字段中,需要同时定义学生模型与教师模型,教师模型与学生模型均保持梯度更新状态。在损失函数Loss字段中,需要定义`DistillationDMLLoss`(学生与教师之间的JS-Div loss)以及`DistillationGTCELoss`(学生与教师关于真值标签的CE loss),作为训练的损失函数。 + +```yaml +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + pretrained_list: + freeze_params_list: # 两个模型互相学习,因此这里两个子网络的参数均不能固定 + - False + - False + models: + - Teacher: + name: PPLCNet_x2_5 # 两个模型互学习,因此均没有加载预训练模型 + class_num: *class_num + pretrained: False + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + +Loss: + Train: + - DistillationGTCELoss: # 因为2个子网络均没有加载预训练模型,这里需要同时计算不同子网络的输出与真值标签之间的CE loss + weight: 1.0 + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 +``` + + + +#### 1.2.3 UDML + +##### 1.2.3.1 UDML 算法介绍 + +论文信息: + +UDML 是百度飞桨视觉团队提出的无需依赖教师模型的知识蒸馏算法,它基于DML进行改进,在蒸馏的过程中,除了考虑两个模型的输出信息,也考虑两个模型的中间层特征信息,从而进一步提升知识蒸馏的精度。更多关于UDML的说明与应用,请参考[PP-ShiTu论文](https://arxiv.org/abs/2111.00775)以及[PP-OCRv3论文](https://arxiv.org/abs/2109.03144)。 + + + +在ImageNet1k公开数据集上,效果如下所示。 +| 策略 | 骨干网络 | 配置文件 | Top-1 acc | 下载链接 | +| --- | --- | --- | --- | --- | +| baseline | PPLCNet_x2_5 | [PPLCNet_x2_5.yaml](../../../ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml) | 74.93% | - | +| UDML | PPLCNet_x2_5 | [PPLCNet_x2_5_dml.yaml](../../../ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml) | 76.74%(**+1.81%**) | - | -因此最终在蒸馏实验中,对于 ResNet 系列学生模型,我们使用 `ResNeXt101_32x16d_wsl` 作为教师模型;对于 MobileNet 系列学生模型,我们使用蒸馏得到的 `ResNet50_vd` 作为教师模型。 - -### 3.2 大数据蒸馏 +##### 1.2.3.2 UDML 配置 -基于 PaddleClas 的蒸馏策略为`大数据集训练 + imagenet1k finetune` 的策略。 -针对从 ImageNet22k 挑选出的 400 万数据,融合 imagenet1k 训练集,组成共 500 万的训练集进行训练,具体地,在不同模型上的训练超参及效果如下。 +```yaml +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + models: + - Teacher: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + # return_patterns表示除了返回输出的logits,也会返回对应名称的中间层feature map + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + key: logits + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + key: logits + model_name_pairs: + - ["Student", "Teacher"] + - DistillationDistanceLoss: # 基于蒸馏结果的距离loss,这里默认使用l2 loss计算block5之间的损失函数 + weight: 1.0 + key: "blocks5" + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 +``` + +**注意(:** 上述在网络中指定`return_patterns`,返回中间层特征的功能是基于TheseusLayer,更多关于TheseusLayer的使用说明,请参考:[TheseusLayer 使用说明](./theseus_layer.md)。 -|Student Model | num_epoch | l2_ecay | batch size/gpu cards | base lr | learning rate decay | top1 acc | -| - |:-: |:-: | :-: |:-: |:-: |:-: | -| MobileNetV1 | 360 | 3e-5 | 4096/8 | 1.6 | cosine_decay_warmup | 77.65% | -| MobileNetV2 | 360 | 1e-5 | 3072/8 | 0.54 | cosine_decay_warmup | 76.34% | -| MobileNetV3_large_x1_0 | 360 | 1e-5 | 5760/24 | 3.65625 | cosine_decay_warmup | 78.54% | -| MobileNetV3_small_x1_0 | 360 | 1e-5 | 5760/24 | 3.65625 | cosine_decay_warmup | 70.11% | -| ResNet50_vd | 360 | 7e-5 | 1024/32 | 0.4 | cosine_decay_warmup | 82.07% | -| ResNet101_vd | 360 | 7e-5 | 1024/32 | 0.4 | cosine_decay_warmup | 83.41% | -| Res2Net200_vd_26w_4s | 360 | 4e-5 | 1024/32 | 0.4 | cosine_decay_warmup | 84.82% | + - -### 3.3 ImageNet1k 训练集 finetune +#### 1.2.4 AFD -对于在大数据集上训练的模型,其学习到的特征可能与 ImageNet1k 数据特征有偏,因此在这里使用 ImageNet1k 数据集对模型进行 finetune。 finetune 的超参和 finetune 的精度收益如下。 +##### 1.2.4.1 AFD 算法介绍 +论文信息: -|Student Model | num_epoch | l2_ecay | batch size/gpu cards | base lr | learning rate decay | top1 acc | -| - |:-: |:-: | :-: |:-: |:-: |:-: | -| MobileNetV1 | 30 | 3e-5 | 4096/8 | 0.016 | cosine_decay_warmup | 77.89% | -| MobileNetV2 | 30 | 1e-5 | 3072/8 | 0.0054 | cosine_decay_warmup | 76.73% | -| MobileNetV3_large_x1_0 | 30 | 1e-5 | 2048/8 | 0.008 | cosine_decay_warmup | 78.96% | -| MobileNetV3_small_x1_0 | 30 | 1e-5 | 6400/32 | 0.025 | cosine_decay_warmup | 71.28% | -| ResNet50_vd | 60 | 7e-5 | 1024/32 | 0.004 | cosine_decay_warmup | 82.39% | -| ResNet101_vd | 30 | 7e-5 | 1024/32 | 0.004 | cosine_decay_warmup | 83.73% | -| Res2Net200_vd_26w_4s | 360 | 4e-5 | 1024/32 | 0.004 | cosine_decay_warmup | 85.13% | - -### 3.4 数据增广以及基于 Fix 策略的微调 +> [Show, attend and distill: Knowledge distillation via attention-based feature matching](https://arxiv.org/abs/2102.02973) +> +> Mingi Ji, Byeongho Heo, Sungrae Park +> +> AAAI, 2018 -* 基于前文所述的实验结论,我们在训练的过程中加入自动增广(AutoAugment)[4],同时进一步减小了 l2_decay(4e-5->2e-5),最终 ResNet50_vd 经过 SSLD 蒸馏策略,在 ImageNet1k 上的精度可以达到 82.99%,相比之前不加数据增广的蒸馏策略再次增加了 0.6% 。 +AFD提出在蒸馏的过程中,利用基于注意力的元网络学习特征之间的相对相似性,并应用识别的相似关系来控制所有可能的特征图pair的蒸馏强度。 +在ImageNet1k公开数据集上,效果如下所示。 -* 对于图像分类任务,在测试的时候,测试尺度为训练尺度的 1.15 倍左右时,往往在不需要重新训练模型的情况下,模型的精度指标就可以进一步提升 [5],对于 82.99% 的 ResNet50_vd 在 320x320 的尺度下测试,精度可达 83.7%,我们进一步使用 Fix 策略,即在 320x320 的尺度下进行训练,使用与预测时相同的数据预处理方法,同时固定除 FC 层以外的所有参数,最终在 320x320 的预测尺度下,精度可以达到 **84.0%**。 +| 策略 | 骨干网络 | 配置文件 | Top-1 acc | 下载链接 | +| --- | --- | --- | --- | --- | +| baseline | ResNet18 | [ResNet18.yaml](../../../ppcls/configs/ImageNet/ResNet/ResNet18.yaml) | 70.8% | - | +| AFD | ResNet18 | [resnet34_distill_resnet18_afd.yaml](../../../ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_afd.yaml) | 71.68%(**+0.88%**) | - | - -### 3.5 实验过程中的一些问题 +注意:这里为了与论文的训练配置保持对齐,设置训练的迭代轮数为100epoch,因此baseline精度低于PaddleClas中开源出的模型精度(71.0%) -* 在预测过程中,batch norm 的平均值与方差是通过加载预训练模型得到(设其模式为 test mode)。在训练过程中,batch norm 是通过统计当前 batch 的信息(设其模式为 train mode),与历史保存信息进行滑动平均计算得到,在蒸馏任务中,我们发现通过 train mode,即教师模型的均值与方差实时变化的模式,去指导学生模型,比通过 test mode 蒸馏,得到的学生模型性能更好一些,下面是一组实验结果。因此我们在该蒸馏方案中,均使用 train mode 去得到教师模型的 soft label 。 +##### 1.2.4.2 AFD 配置 -|Teacher Model | Teacher Top1 | Student Model | Student Top1| -|- |:-: |:-: | :-: | -| ResNet50_vd | 82.35% | MobileNetV3_large_x1_0 | 76.00% | -| ResNet50_vd | 82.35% | MobileNetV3_large_x1_0 | 75.84% | +AFD配置如下所示。在模型构建Arch字段中,需要同时定义学生模型与教师模型,固定教师模型的权重。这里需要对从教师模型获取的特征进行变换,进而与学生模型进行损失函数的计算。在损失函数Loss字段中,需要定义`DistillationKLDivLoss`(学生与教师之间的KL-Div loss)、`AFDLoss`(学生与教师之间的AFD loss)以及`DistillationGTCELoss`(学生与教师关于真值标签的CE loss),作为训练的损失函数。 - -## 4. 蒸馏模型的应用 +```yaml +Arch: + name: "DistillationModel" + pretrained_list: + freeze_params_list: + models: + - Teacher: + name: AttentionModel # 包含若干个串行的网络,后面的网络会将前面的网络输出作为输入并进行处理 + pretrained_list: + freeze_params_list: + - True + - False + models: + # AttentionModel 的基础网络 + - ResNet34: + name: ResNet34 + pretrained: True + # return_patterns表示除了返回输出的logits,也会返回对应名称的中间层feature map + return_patterns: &t_keys ["blocks[0]", "blocks[1]", "blocks[2]", "blocks[3]", + "blocks[4]", "blocks[5]", "blocks[6]", "blocks[7]", + "blocks[8]", "blocks[9]", "blocks[10]", "blocks[11]", + "blocks[12]", "blocks[13]", "blocks[14]", "blocks[15]"] + # AttentionModel的变换网络,会对基础子网络的特征进行变换 + - LinearTransformTeacher: + name: LinearTransformTeacher + qk_dim: 128 + keys: *t_keys + t_shapes: &t_shapes [[64, 56, 56], [64, 56, 56], [64, 56, 56], [128, 28, 28], + [128, 28, 28], [128, 28, 28], [128, 28, 28], [256, 14, 14], + [256, 14, 14], [256, 14, 14], [256, 14, 14], [256, 14, 14], + [256, 14, 14], [512, 7, 7], [512, 7, 7], [512, 7, 7]] - -### 4.1 使用方法 + - Student: + name: AttentionModel + pretrained_list: + freeze_params_list: + - False + - False + models: + - ResNet18: + name: ResNet18 + pretrained: False + return_patterns: &s_keys ["blocks[0]", "blocks[1]", "blocks[2]", "blocks[3]", + "blocks[4]", "blocks[5]", "blocks[6]", "blocks[7]"] + - LinearTransformStudent: + name: LinearTransformStudent + qk_dim: 128 + keys: *s_keys + s_shapes: &s_shapes [[64, 56, 56], [64, 56, 56], [128, 28, 28], [128, 28, 28], + [256, 14, 14], [256, 14, 14], [512, 7, 7], [512, 7, 7]] + t_shapes: *t_shapes -* 中间层学习率调整。蒸馏得到的模型的中间层特征图更加精细化,因此将蒸馏模型预训练应用到其他任务中时,如果采取和之前相同的学习率,容易破坏中间层特征。而如果降低整体模型训练的学习率,则会带来训练收敛速度慢的问题。因此我们使用了中间层学习率调整的策略。具体地: - * 针对 ResNet50_vd,我们设置一个学习率倍数列表,res block 之前的 3 个 conv2d 卷积参数具有统一的学习率倍数,4 个 res block 的 conv2d 分别有一个学习率参数,共需设置 5 个学习率倍数的超参。在实验中发现。用于迁移学习 finetune 分类模型时,`[0.1,0.1,0.2,0.2,0.3]` 的中间层学习率倍数设置在绝大多数的任务中都性能更好;而在目标检测任务中,`[0.05,0.05,0.05,0.1,0.15]` 的中间层学习率倍数设置能够带来更大的精度收益。 - * 对于 MoblileNetV3_large_x1_0,由于其包含 15 个 block,我们设置每 3 个 block 共享一个学习率倍数参数,因此需要共 5 个学习率倍数的参数,最终发现在分类和检测任务中,`[0.25,0.25,0.5,0.5,0.75]` 的中间层学习率倍数能够带来更大的精度收益。 + infer_model_name: "Student" -* 适当的 l2 decay 。不同分类模型在训练的时候一般都会根据模型设置不同的 l2 decay,大模型为了防止过拟合,往往会设置更大的 l2 decay,如 ResNet50 等模型,一般设置为 `1e-4` ;而如 MobileNet 系列模型,在训练时往往都会设置为 `1e-5~4e-5`,防止模型过度欠拟合,在蒸馏时亦是如此。在将蒸馏模型应用到目标检测任务中时,我们发现也需要调节 backbone 甚至特定任务模型模型的 l2 decay,和预训练蒸馏时的 l2 decay 尽可能保持一致。以 Faster RCNN MobiletNetV3 FPN 为例,我们发现仅修改该参数,在 COCO2017 数据集上就可以带来最多 0.5% 左右的精度(mAP)提升(默认 Faster RCNN l2 decay 为 1e-4,我们修改为 1e-5~4e-5 均有 0.3%~0.5% 的提升)。 +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + key: logits + - DistillationKLDivLoss: # 蒸馏的KL-Div loss,会根据model_name_pairs中的模型名称去提取对应模型的输出特征,计算loss + weight: 0.9 # 该loss的权重 + model_name_pairs: [["Student", "Teacher"]] + temperature: 4 + key: logits + - AFDLoss: # AFD loss + weight: 50.0 + model_name_pair: ["Student", "Teacher"] + student_keys: ["bilinear_key", "value"] + teacher_keys: ["query", "value"] + s_shapes: *s_shapes + t_shapes: *t_shapes + Eval: + - CELoss: + weight: 1.0 +``` - -### 4.2 迁移学习 finetune -* 为验证迁移学习的效果,我们在 10 个小的数据集上验证其效果。在这里为了保证实验的可对比性,我们均使用 ImageNet1k 数据集训练的标准预处理过程,对于蒸馏模型我们也添加了蒸馏模型中间层学习率的搜索。 -* 对于 ResNet50_vd, baseline 为 Top1 Acc 79.12% 的预训练模型基于 grid search 搜索得到的最佳精度,对比实验则为基于该精度对预训练和中间层学习率进一步搜索得到的最佳精度。下面给出 10 个数据集上所有 baseline 和蒸馏模型的精度对比。 +**注意(:** 上述在网络中指定`return_patterns`,返回中间层特征的功能是基于TheseusLayer,更多关于TheseusLayer的使用说明,请参考:[TheseusLayer 使用说明](./theseus_layer.md)。 + -| Dataset | Model | Baseline Top1 Acc | Distillation Model Finetune | -|- |:-: |:-: | :-: | -| Oxford102 flowers | ResNete50_vd | 97.18% | 97.41% | -| caltech-101 | ResNete50_vd | 92.57% | 93.21% | -| Oxford-IIIT-Pets | ResNete50_vd | 94.30% | 94.76% | -| DTD | ResNete50_vd | 76.48% | 77.71% | -| fgvc-aircraft-2013b | ResNete50_vd | 88.98% | 90.00% | -| Stanford-Cars | ResNete50_vd | 92.65% | 92.76% | -| SUN397 | ResNete50_vd | 64.02% | 68.36% | -| cifar100 | ResNete50_vd | 86.50% | 87.58% | -| cifar10 | ResNete50_vd | 97.72% | 97.94% | -| Food-101 | ResNete50_vd | 89.58% | 89.99% | +#### 1.2.5 DKD -* 可以看出在上面 10 个数据集上,结合适当的中间层学习率倍数设置,蒸馏模型平均能够带来 1% 以上的精度提升。 +##### 1.2.5.1 DKD 算法介绍 - -### 4.3 目标检测 +论文信息: -我们基于两阶段目标检测 Faster/Cascade RCNN 模型验证蒸馏得到的预训练模型的效果。 -* ResNet50_vd +> [Decoupled Knowledge Distillation](https://arxiv.org/abs/2203.08679) +> +> Borui Zhao, Quan Cui, Renjie Song, Yiyu Qiu, Jiajun Liang +> +> CVPR, 2022 -设置训练与评测的尺度均为 640x640,最终 COCO 上检测指标如下。 +DKD将蒸馏中常用的 KD Loss 进行了解耦成为Target Class Knowledge Distillation(TCKD,目标类知识蒸馏)以及Non-target Class Knowledge Distillation(NCKD,非目标类知识蒸馏)两个部分,对两个部分的作用分别研究,并使它们各自的权重可以独立调节,提升了蒸馏的精度和灵活性。 -| Model | train/test scale | pretrain top1 acc | feature map lr | coco mAP | -|- |:-: |:-: | :-: | :-: | -| Faster RCNN R50_vd FPN | 640/640 | 79.12% | [1.0,1.0,1.0,1.0,1.0] | 34.8% | -| Faster RCNN R50_vd FPN | 640/640 | 79.12% | [0.05,0.05,0.1,0.1,0.15] | 34.3% | -| Faster RCNN R50_vd FPN | 640/640 | 82.18% | [0.05,0.05,0.1,0.1,0.15] | 36.3% | +在ImageNet1k公开数据集上,效果如下所示。 -在这里可以看出,对于未蒸馏模型,过度调整中间层学习率反而降低最终检测模型的性能指标。基于该蒸馏模型,我们也提供了领先的服务端实用目标检测方案,详细的配置与训练代码均已开源,可以参考 [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection/tree/master/configs/rcnn_enhance)。 +| 策略 | 骨干网络 | 配置文件 | Top-1 acc | 下载链接 | +| --- | --- | --- | --- | --- | +| baseline | ResNet18 | [ResNet18.yaml](../../../ppcls/configs/ImageNet/ResNet/ResNet18.yaml) | 70.8% | - | +| AFD | ResNet18 | [resnet34_distill_resnet18_dkd.yaml](../../../ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml) | 72.59%(**+1.79%**) | - | - -## 5. SSLD 实战 -本节将基于 ImageNet-1K 的数据集详细介绍 SSLD 蒸馏实验,如果想快速体验此方法,可以参考 [**30 分钟玩转 PaddleClas(进阶版)**](../quick_start/quick_start_classification_professional.md)中基于 CIFAR100 的 SSLD 蒸馏实验。 +##### 1.2.5.2 DKD 配置 - -### 5.1 参数配置 +DKD 配置如下所示。在模型构建Arch字段中,需要同时定义学生模型与教师模型,教师模型固定参数,且需要加载预训练模型。在损失函数Loss字段中,需要定义`DistillationDKDLoss`(学生与教师之间的DKD loss)以及`DistillationGTCELoss`(学生与教师关于真值标签的CE loss),作为训练的损失函数。 -实战部分提供了 SSLD 蒸馏的示例,在 `ppcls/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml` 中提供了 `MobileNetV3_large_x1_0` 蒸馏 `MobileNetV3_small_x1_0` 的配置文件,用户可以在 `tools/train.sh` 里直接替换配置文件的路径即可使用。 ```yaml Arch: @@ -216,53 +419,233 @@ Arch: - False models: - Teacher: - name: MobileNetV3_large_x1_0 + name: ResNet34 pretrained: True - use_ssld: True + - Student: - name: MobileNetV3_small_x1_0 + name: ResNet18 pretrained: False infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationDKDLoss: + weight: 1.0 + model_name_pairs: [["Student", "Teacher"]] + temperature: 1 + alpha: 1.0 + beta: 1.0 + Eval: + - CELoss: + weight: 1.0 ``` -在参数配置中,`freeze_params_list` 中需要指定模型是否需要冻结参数,`models` 中需要指定 Teacher 模型和 Student 模型,其中 Teacher 模型需要加载预训练模型。用户可以直接在此处更改模型。 + - -### 5.2 启动命令 +#### 1.2.6 DIST -当用户配置完训练环境后,类似于训练其他分类任务,只需要将 `tools/train.sh` 中的配置文件替换成为相应的蒸馏配置文件即可。 +##### 1.2.6.1 DIST 算法介绍 -其中 `train.sh` 中的内容如下: +论文信息: -```bash -python -m paddle.distributed.launch \ - --selected_gpus="0,1,2,3" \ - --log_dir=mv3_large_x1_0_distill_mv3_small_x1_0 \ +> [Knowledge Distillation from A Stronger Teacher](https://arxiv.org/pdf/2205.10536v1.pdf) +> +> Tao Huang, Shan You, Fei Wang, Chen Qian, Chang Xu +> +> 2022, under review + +使用KD方法进行模型蒸馏时,教师模型精度提升时,蒸馏的效果往往难以同步提升。本文提出DIST方法,使用皮尔逊相关系数(Pearson correlation coefficient)去表征学生模型与教师模型之间的差异,替代蒸馏过程中默认的KL散度,从而保证模型可以学到更加准确的相关性信息。 + +在ImageNet1k公开数据集上,效果如下所示。 + +| 策略 | 骨干网络 | 配置文件 | Top-1 acc | 下载链接 | +| --- | --- | --- | --- | --- | +| baseline | ResNet18 | [ResNet18.yaml](../../../ppcls/configs/ImageNet/ResNet/ResNet18.yaml) | 70.8% | - | +| DIST | ResNet18 | [resnet34_distill_resnet18_dist.yaml](../../../ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml) | 71.99%(**+1.19%**) | - | + + +##### 1.2.6.2 DIST 配置 + +DIST 配置如下所示。在模型构建Arch字段中,需要同时定义学生模型与教师模型,教师模型固定参数,且需要加载预训练模型。在损失函数Loss字段中,需要定义`DistillationDISTLoss`(学生与教师之间的DIST loss)以及`DistillationGTCELoss`(学生与教师关于真值标签的CE loss),作为训练的损失函数。 + + +```yaml +Arch: + name: "DistillationModel" + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet34 + pretrained: True + + - Student: + name: ResNet18 + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationDISTLoss: + weight: 2.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 +``` + + + + +## 2. 模型训练、评估和预测 + + + +### 2.1 环境配置 + +* 安装:请先参考 [Paddle 安装教程](../installation/install_paddle.md) 以及 [PaddleClas 安装教程](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 2.2 数据准备 + +请在[ImageNet 官网](https://www.image-net.org/)准备 ImageNet-1k 相关的数据。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,将下载好的数据命名为 `ILSVRC2012` ,存放于此。 `ILSVRC2012` 目录中具有以下数据: + +``` +├── train +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +├── train_list.txt +... +├── val +│ ├── ILSVRC2012_val_00000001.JPEG +│ ├── ILSVRC2012_val_00000002.JPEG +├── val_list.txt +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + + +如果包含与训练集场景相似的无标注数据,则也可以按照与训练集标注完全相同的方式进行整理,将文件与当前有标注的数据集放在相同目录下,将其标签值记为0,假设整理的标签文件名为`train_list_unlabel.txt`,则可以通过下面的命令生成用于SSLD训练的标签文件。 + +```shell +cat train_list.txt train_list_unlabel.txt > train_list_all.txt +``` + + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + + +### 2.3 模型训练 + + +以SSLD知识蒸馏算法为例,介绍知识蒸馏算法的模型训练、评估、预测等过程。配置文件为 [PPLCNet_x2_5_ssld.yaml](../../../ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml) ,使用下面的命令可以完成模型训练。 + + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ tools/train.py \ - -c ./ppcls/configs/ImageNet/Distillation/mv3_large_x1_0_distill_mv3_small_x1_0.yaml + -c ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml ``` -运行 `train.sh` : + + +### 2.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 ```bash -sh tools/train.sh +python3 tools/eval.py \ + -c ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model ``` - -### 5.3 注意事项 +其中 `-o Global.pretrained_model="output/DistillationModel/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + -* 用户在使用 SSLD 蒸馏之前,首先需要在目标数据集上训练一个教师模型,该教师模型用于指导学生模型在该数据集上的训练。 +### 2.5 模型预测 -* 如果学生模型没有加载预训练模型,训练的其他超参数可以参考该学生模型在 ImageNet-1k 上训练的超参数,如果学生模型加载了预训练模型,学习率可以调整到原来的 1/10 或者 1/100 。 +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml \ + -o Global.pretrained_model=output/DistillationModel/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [8, 7, 86, 82, 21], 'scores': [0.87908, 0.12091, 0.0, 0.0, 0.0], 'file_name': 'docs/images/inference_deployment/whl_demo.jpg', 'label_names': ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'kite']}] +``` -* 在 SSLD 蒸馏的过程中,学生模型只学习 soft-label 导致训练目标变的更加复杂,建议可以适当的调小 `l2_decay` 的值来获得更高的验证集准确率。 -* 若用户准备添加无标签的训练数据,只需要将新的训练数据放置在原本训练数据的路径下,生成新的数据 list 即可,另外,新生成的数据 list 需要将无标签的数据添加伪标签(只是为了统一读数据)。 +**备注:** - -## 6. 参考文献 +* 这里`-o Global.pretrained_model="output/ResNet50/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `docs/images/inference_deployment/whl_demo.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + + + + +### 2.6 模型导出与推理 + + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +在模型推理之前需要先导出模型。对于知识蒸馏训练得到的模型,在导出时需要指定`-o Global.infer_model_name=Student`,来表示导出的模型为学生模型。具体命令如下所示。 + +```shell +python3 tools/export_model.py \ + -c ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml \ + -o Global.pretrained_model=./output/DistillationModel/best_model \ + -o Arch.infer_model_name=Student +``` + +最终在`inference`目录下会产生`inference.pdiparams`、`inference.pdiparams.info`、`inference.pdmodel` 3个文件。 + +关于更多模型推理相关的教程,请参考:[Python 预测推理](../inference_deployment/python_deploy.md)。 + + + + +## 3. 参考文献 [1] Hinton G, Vinyals O, Dean J. Distilling the knowledge in a neural network[J]. arXiv preprint arXiv:1503.02531, 2015. @@ -273,3 +656,19 @@ sh tools/train.sh [4] Cubuk E D, Zoph B, Mane D, et al. Autoaugment: Learning augmentation strategies from data[C]//Proceedings of the IEEE conference on computer vision and pattern recognition. 2019: 113-123. [5] Touvron H, Vedaldi A, Douze M, et al. Fixing the train-test resolution discrepancy[C]//Advances in Neural Information Processing Systems. 2019: 8250-8260. + +[6] Cui C, Guo R, Du Y, et al. Beyond Self-Supervision: A Simple Yet Effective Network Distillation Alternative to Improve Backbones[J]. arXiv preprint arXiv:2103.05959, 2021. + +[7] Zhang Y, Xiang T, Hospedales T M, et al. Deep mutual learning[C]//Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2018: 4320-4328. + +[8] Heo B, Kim J, Yun S, et al. A comprehensive overhaul of feature distillation[C]//Proceedings of the IEEE/CVF International Conference on Computer Vision. 2019: 1921-1930. + +[9] Du Y, Li C, Guo R, et al. PP-OCRv2: Bag of Tricks for Ultra Lightweight OCR System[J]. arXiv preprint arXiv:2109.03144, 2021. + +[10] Park W, Kim D, Lu Y, et al. Relational knowledge distillation[C]//Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019: 3967-3976. + +[11] Zhao B, Cui Q, Song R, et al. Decoupled Knowledge Distillation[J]. arXiv preprint arXiv:2203.08679, 2022. + +[12] Ji M, Heo B, Park S. Show, attend and distill: Knowledge distillation via attention-based feature matching[C]//Proceedings of the AAAI Conference on Artificial Intelligence. 2021, 35(9): 7945-7952. + +[13] Huang T, You S, Wang F, et al. Knowledge Distillation from A Stronger Teacher[J]. arXiv preprint arXiv:2205.10536, 2022. diff --git a/docs/zh_CN/advanced_tutorials/ssld.md b/docs/zh_CN/advanced_tutorials/ssld.md new file mode 100644 index 0000000000000000000000000000000000000000..e19a98cbc866bc02f0ca9df6d8e939b3342663f5 --- /dev/null +++ b/docs/zh_CN/advanced_tutorials/ssld.md @@ -0,0 +1,171 @@ + +# SSLD 知识蒸馏实战 + +## 目录 + +- [1. 算法介绍](#1) + - [1.1 知识蒸馏简介](#1.1) + - [1.2 SSLD蒸馏策略](#1.2) + - [1.3 SKL-UGI蒸馏策略](#1.3) +- [2. SSLD预训练模型库](#2) +- [3. SSLD使用](#3) + - [3.1 加载SSLD模型进行微调](#3.1) + - [3.2 使用SSLD方案进行知识蒸馏](#3.2) +- [4. 参考文献](#4) + + + + + +## 1. 算法介绍 + + + +### 1.1 简介 + +PaddleClas 融合已有的知识蒸馏方法 [2,3],提供了一种简单的半监督标签知识蒸馏方案(SSLD,Simple Semi-supervised Label Distillation),基于 ImageNet1k 分类数据集,在 ResNet_vd 以及 MobileNet 系列上的精度均有超过 3% 的绝对精度提升,具体指标如下图所示。 + +
+ +
+ + + +### 1.2 SSLD蒸馏策略 + +SSLD 的流程图如下图所示。 + +
+ +
+ +首先,我们从 ImageNet22k 中挖掘出了近 400 万张图片,同时与 ImageNet-1k 训练集整合在一起,得到了一个新的包含 500 万张图片的数据集。然后,我们将学生模型与教师模型组合成一个新的网络,该网络分别输出学生模型和教师模型的预测分布,与此同时,固定教师模型整个网络的梯度,而学生模型可以做正常的反向传播。最后,我们将两个模型的 logits 经过 softmax 激活函数转换为 soft label,并将二者的 soft label 做 JS 散度作为损失函数,用于蒸馏模型训练。 + +以 MobileNetV3(该模型直接训练,精度为 75.3%)的知识蒸馏为例,该方案的核心策略优化点如下所示。 + + +| 实验ID | 策略 | Top-1 acc | +|:------:|:---------:|:--------:| +| 1 | baseline | 75.60% | +| 2 | 更换教师模型精度为82.4%的权重 | 76.00% | +| 3 | 使用改进的JS散度损失函数 | 76.20% | +| 4 | 迭代轮数增加至360epoch | 77.10% | +| 5 | 添加400W挖掘得到的无标注数据 | 78.50% | +| 6 | 基于ImageNet1k数据微调 | 78.90% | + +* 注:其中baseline的训练条件为 + * 训练数据:ImageNet1k数据集 + * 损失函数:Cross Entropy Loss + * 迭代轮数:120epoch + + +SSLD 蒸馏方案的一大特色就是无需使用图像的真值标签,因此可以任意扩展数据集的大小,考虑到计算资源的限制,我们在这里仅基于 ImageNet22k 数据集对蒸馏任务的训练集进行扩充。在 SSLD 蒸馏任务中,我们使用了 `Top-k per class` 的数据采样方案 [3] 。具体步骤如下。 + +(1)训练集去重。我们首先基于 SIFT 特征相似度匹配的方式对 ImageNet22k 数据集与 ImageNet1k 验证集进行去重,防止添加的 ImageNet22k 训练集中包含 ImageNet1k 验证集图像,最终去除了 4511 张相似图片。部分过滤的相似图片如下所示。 + +
+ +
+ +(2)大数据集 soft label 获取,对于去重后的 ImageNet22k 数据集,我们使用 `ResNeXt101_32x16d_wsl` 模型进行预测,得到每张图片的 soft label 。 + +(3)Top-k 数据选择,ImageNet1k 数据共有 1000 类,对于每一类,找出属于该类并且得分最高的 `k` 张图片,最终得到一个数据量不超过 `1000*k` 的数据集(某些类上得到的图片数量可能少于 `k` 张)。 + +(4)将该数据集与 ImageNet1k 的训练集融合组成最终蒸馏模型所使用的数据集,数据量为 500 万。 + + + + +## 1.3 SKL-UGI蒸馏策略 + +此外,在无标注数据选择的过程中,我们发现使用更加通用的数据,即使不需要严格的数据筛选过程,也可以帮助知识蒸馏任务获得稳定的精度提升,因而提出了SKL-UGI (Symmetrical-KL Unlabeled General Images distillation)知识蒸馏方案。 + +通用数据可以使用ImageNet数据或者与场景相似的数据集。更多关于SKL-UGI的应用,请参考:[超轻量图像分类方案PULC使用教程](../PULC/PULC_train.md)。 + + + + +## 2. 预训练模型库 + + +移动端预训练模型库列表如下所示。 + +| 模型 | FLOPs(M) | Params(M) | top-1 acc | SSLD top-1 acc | 精度收益 | 下载链接 | +|-------------------|----------|-----------|----------|---------------|--------|------| +| PPLCNetV2_base | 604.16 | 6.54 | 77.04% | 80.10% | +3.06% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_ssld_pretrained.pdparams) | +| PPLCNet_x2_5 | 906.49 | 9.04 | 76.60% | 80.82% | +4.22% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_ssld_pretrained.pdparams) | +| PPLCNet_x1_0 | 160.81 | 2.96 | 71.32% | 74.39% | +3.07% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_ssld_pretrained.pdparams) | +| PPLCNet_x0_5 | 47.28 | 1.89 | 63.14% | 66.10% | +2.96% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_ssld_pretrained.pdparams) | +| PPLCNet_x0_25 | 18.43 | 1.52 | 51.86% | 53.43% | +1.57% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_ssld_pretrained.pdparams) | +| MobileNetV1 | 578.88 | 4.19 | 71.00% | 77.90% | +6.90% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_ssld_pretrained.pdparams) | +| MobileNetV2 | 327.84 | 3.44 | 72.20% | 76.74% | +4.54% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_ssld_pretrained.pdparams) | +| MobileNetV3_large_x1_0 | 229.66 | 5.47 | 75.30% | 79.00% | +3.70% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_ssld_pretrained.pdparams) | +| MobileNetV3_small_x1_0 | 63.67 | 2.94 | 68.20% | 71.30% | +3.10% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_ssld_pretrained.pdparams) | +| MobileNetV3_small_x0_35 | 14.56 | 1.66 | 53.00% | 55.60% | +2.60% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_ssld_pretrained.pdparams) | +| GhostNet_x1_3_ssld | 236.89 | 7.30 | 75.70% | 79.40% | +3.70% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_ssld_pretrained.pdparams) | + +* 注:其中的`top-1 acc`表示使用普通训练方式得到的模型精度,`SSLD top-1 acc`表示使用SSLD知识蒸馏训练策略得到的模型精度。 + + +服务端预训练模型库列表如下所示。 + +| 模型 | FLOPs(G) | Params(M) | top-1 acc | SSLD top-1 acc | 精度收益 | 下载链接 | +|----------------------|----------|-----------|----------|---------------|--------|-------------------------------------------------------------------------------------------| +| PPHGNet_base | 25.14 | 71.62 | - | 85.00% | - | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_base_ssld_pretrained.pdparams) | +| PPHGNet_small | 8.53 | 24.38 | 81.50% | 83.80% | +2.30% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_ssld_pretrained.pdparams) | +| PPHGNet_tiny | 4.54 | 14.75 | 79.83% | 81.95% | +2.12% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_ssld_pretrained.pdparams) | +| ResNet50_vd | 8.67 | 25.58 | 79.10% | 83.00% | +3.90% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams) | +| ResNet101_vd | 16.1 | 44.57 | 80.20% | 83.70% | +3.50% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_ssld_pretrained.pdparams) | +| ResNet34_vd | 7.39 | 21.82 | 76.00% | 79.70% | +3.70% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet34_vd_ssld_pretrained.pdparams) | +| Res2Net50_vd_26w_4s | 8.37 | 25.06 | 79.80% | 83.10% | +3.30% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_vd_26w_4s_ssld_pretrained.pdparams) | +| Res2Net101_vd_26w_4s | 16.67 | 45.22 | 80.60% | 83.90% | +3.30% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net101_vd_26w_4s_ssld_pretrained.pdparams) | +| Res2Net200_vd_26w_4s | 31.49 | 76.21 | 81.20% | 85.10% | +3.90% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_ssld_pretrained.pdparams) | +| HRNet_W18_C | 4.14 | 21.29 | 76.90% | 81.60% | +4.70% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/_ssld_pretrained.pdparams) | +| HRNet_W48_C | 34.58 | 77.47 | 79.00% | 83.60% | +4.60% | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_ssld_pretrained.pdparams) | +| SE_HRNet_W64_C | 57.83 | 128.97 | - | 84.70% | - | [链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SE_HRNet_W64_C_ssld_pretrained.pdparams) | + + + + +## 3. SSLD使用方法 + + + +### 3.1 加载SSLD模型进行微调 + +如果希望直接使用预训练模型,可以在训练的时候,加入参数`-o Arch.pretrained=True -o Arch.use_ssld=True`,表示使用基于SSLD的预训练模型,示例如下所示。 + +```shell +# 单机单卡训练 +python3 tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Arch.pretrained=True -o Arch.use_ssld=True +# 单机多卡训练 +python3 -m paddle.distributed.launch --gpus="0,1,2,3" tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Arch.pretrained=True -o Arch.use_ssld=True +``` + + + +### 3.2 使用SSLD方案进行知识蒸馏 + +相比于其他大多数知识蒸馏算法,SSLD摆脱对数据标注的依赖,通过引入无标注数据,可以进一步提升模型精度。 + +对于无标注数据,需要按照与有标注数据完全相同的整理方式,将文件与当前有标注的数据集放在相同目录下,将其标签值记为`0`,假设整理的标签文件名为`train_list_unlabel.txt`,则可以通过下面的命令生成用于SSLD训练的标签文件。 + +```shell +cat train_list.txt train_list_unlabel.txt > train_list_all.txt +``` + +更多关于图像分类任务的数据标签说明,请参考:[PaddleClas图像分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) + +PaddleClas中集成了PULC超轻量图像分类实用方案,里面包含SSLD ImageNet预训练模型的使用以及更加通用的无标签数据的知识蒸馏方案,更多详细信息,请参考[PULC超轻量图像分类实用方案使用教程](../PULC/PULC_train.md)。 + + + +## 4. 参考文献 + +[1] Hinton G, Vinyals O, Dean J. Distilling the knowledge in a neural network[J]. arXiv preprint arXiv:1503.02531, 2015. + +[2] Bagherinezhad H, Horton M, Rastegari M, et al. Label refinery: Improving imagenet classification through label progression[J]. arXiv preprint arXiv:1805.02641, 2018. + +[3] Yalniz I Z, Jégou H, Chen K, et al. Billion-scale semi-supervised learning for image classification[J]. arXiv preprint arXiv:1905.00546, 2019. + +[4] Touvron H, Vedaldi A, Douze M, et al. Fixing the train-test resolution discrepancy[C]//Advances in Neural Information Processing Systems. 2019: 8250-8260. diff --git a/docs/zh_CN/algorithm_introduction/ImageNet_models.md b/docs/zh_CN/algorithm_introduction/ImageNet_models.md index ee98de442a40fb7c37b2274b756a728f7dcfc5af..73ac0e8534c1370f7ff6e0cc9ceaaeac6b364ed6 100644 --- a/docs/zh_CN/algorithm_introduction/ImageNet_models.md +++ b/docs/zh_CN/algorithm_introduction/ImageNet_models.md @@ -5,40 +5,41 @@ ## 目录 -- [1. 模型库概览图](#1) -- [2. SSLD 知识蒸馏预训练模型](#2) - - [2.1 服务器端知识蒸馏模型](#2.1) - - [2.2 移动端知识蒸馏模型](#2.2) - - [2.3 Intel CPU 端知识蒸馏模型](#2.3) -- [3. PP-LCNet 系列](#3) -- [4. ResNet 系列](#4) -- [5. 移动端系列](#5) -- [6. SEResNeXt 与 Res2Net 系列](#6) -- [7. DPN 与 DenseNet 系列](#7) -- [8. HRNet 系列](#8) -- [9. Inception 系列](#9) -- [10. EfficientNet 与 ResNeXt101_wsl 系列](#10) -- [11. ResNeSt 与 RegNet 系列](#11) -- [12. ViT_and_DeiT 系列](#12) -- [13. RepVGG 系列](#13) -- [14. MixNet 系列](#14) -- [15. ReXNet 系列](#15) -- [16. SwinTransformer 系列](#16) -- [17. LeViT 系列](#17) -- [18. Twins 系列](#18) -- [19. HarDNet 系列](#19) -- [20. DLA 系列](#20) -- [21. RedNet 系列](#21) -- [22. TNT 系列](#22) -- [23. CSwinTransformer 系列](#23) -- [24. PVTV2 系列](#24) -- [25. MobileViT 系列](#25) -- [26. 其他模型](#26) +- [模型库概览图](#Overview) +- [SSLD 知识蒸馏预训练模型](#SSLD) + - [服务器端知识蒸馏模型](#SSLD_server) + - [移动端知识蒸馏模型](#SSLD_mobile) + - [Intel CPU 端知识蒸馏模型](#SSLD_intel_cpu) +- [PP-LCNet & PP-LCNetV2 系列](#PPLCNet) +- [PP-HGNet 系列](#PPHGNet) +- [ResNet 系列](#ResNet) +- [移动端系列](#Mobile) +- [SEResNeXt 与 Res2Net 系列](#SEResNeXt_Res2Net) +- [DPN 与 DenseNet 系列](#DPN&DenseNet) +- [HRNet 系列](#HRNet) +- [Inception 系列](#Inception) +- [EfficientNet 与 ResNeXt101_wsl 系列](#EfficientNetRes&NeXt101_wsl) +- [ResNeSt 与 RegNet 系列](#ResNeSt&RegNet) +- [ViT_and_DeiT 系列](#ViT&DeiT) +- [RepVGG 系列](#RepVGG) +- [MixNet 系列](#MixNet) +- [ReXNet 系列](#ReXNet) +- [SwinTransformer 系列](#SwinTransformer) +- [LeViT 系列](#LeViT) +- [Twins 系列](#Twins) +- [HarDNet 系列](#HarDNet) +- [DLA 系列](#DLA) +- [RedNet 系列](#RedNet) +- [TNT 系列](#TNT) +- [CSwinTransformer 系列](#CSwinTransformer) +- [PVTV2 系列](#PVTV2) +- [MobileViT 系列](#MobileViT) +- [其他模型](#Others) - [参考文献](#reference) - + -## 1. 模型库概览图 +## 模型库概览图 基于 ImageNet1k 分类数据集,PaddleClas 支持 37 个系列分类网络结构以及对应的 217 个图像分类预训练模型,训练技巧、每个系列网络结构的简单介绍和性能评估将在相应章节展现,下面所有的速度指标评估环境如下: * Arm CPU 的评估环境基于骁龙 855(SD855)。 @@ -58,14 +59,14 @@ ![](../../images/models/V100_benchmark/v100.fp32.bs1.visiontransformer.png) - + -## 2. SSLD 知识蒸馏预训练模型 +## SSLD 知识蒸馏预训练模型 基于 SSLD 知识蒸馏的预训练模型列表如下所示,更多关于 SSLD 知识蒸馏方案的介绍可以参考:[SSLD 知识蒸馏文档](./knowledge_distillation.md)。 - + -### 2.1 服务器端知识蒸馏模型 +### 服务器端知识蒸馏模型 | 模型 | Top-1 Acc | Reference
Top-1 Acc | Acc gain | time(ms)
bs=1 | time(ms)
bs=4 | time(ms)
bs=8 | FLOPs(G) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | |---------------------|-----------|-----------|---------------|----------------|-----------|----------|-----------|-----------------------------------|-----------------------------------|-----------------------------------| @@ -78,10 +79,12 @@ | HRNet_W18_C_ssld | 0.812 | 0.769 | 0.043 | 6.66 | 8.94 | 11.95 | 4.32 | 21.35 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W18_C_ssld_infer.tar) | | HRNet_W48_C_ssld | 0.836 | 0.790 | 0.046 | 11.07 | 17.06 | 27.28 | 17.34 | 77.57 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W48_C_ssld_infer.tar) | | SE_HRNet_W64_C_ssld | 0.848 | - | - | 17.11 | 26.87 | 43.24 | 29.00 | 129.12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SE_HRNet_W64_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_HRNet_W64_C_ssld_infer.tar) | +| PPHGNet_tiny_ssld | 0.8195 | 0.7983 | 0.021 | 1.77 | - | - | 4.54 | 14.75 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_ssld_infer.tar) | +| PPHGNet_small_ssld | 0.8382 | 0.8151 | 0.023 | 2.52 | - | - | 8.53 | 24.38 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_ssld_infer.tar) | - + -### 2.2 移动端知识蒸馏模型 +### 移动端知识蒸馏模型 | 模型 | Top-1 Acc | Reference
Top-1 Acc | Acc gain | SD855 time(ms)
bs=1, thread=1 | SD855 time(ms)
bs=1, thread=2 | SD855 time(ms)
bs=1, thread=4 | FLOPs(M) | Params(M) | 模型大小(M) | 预训练模型下载地址 | inference模型下载地址 | |---------------------|-----------|-----------|---------------|----------------|-----------|----------|-----------|-----------------------------------|-----------------------------------|-----------------------------------|-----------------------------------| @@ -92,9 +95,9 @@ | MobileNetV3_small_x1_0_ssld | 0.713 | 0.682 | 0.031 | 5.63 | 3.65 | 2.60 | 63.67 | 2.95 | 12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_small_x1_0_ssld_infer.tar) | | GhostNet_x1_3_ssld | 0.794 | 0.757 | 0.037 | 19.16 | 12.25 | 9.40 | 236.89 | 7.38 | 29 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/GhostNet_x1_3_ssld_infer.tar) | - + -### 2.3 Intel CPU 端知识蒸馏模型 +### Intel CPU 端知识蒸馏模型 | 模型 | Top-1 Acc | Reference
Top-1 Acc | Acc gain | Intel-Xeon-Gold-6148 time(ms)
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | |---------------------|-----------|-----------|---------------|----------------|----------|-----------|-----------------------------------|-----------------------------------| @@ -104,26 +107,49 @@ * 注: `Reference Top-1 Acc` 表示 PaddleClas 基于 ImageNet1k 数据集训练得到的预训练模型精度。 - + -## 3. PP-LCNet 系列 [[28](#ref28)] +## PP-LCNet & PP-LCNetV2 系列 [[28](#ref28)] -PP-LCNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-LCNet 系列模型文档](../models/PP-LCNet.md)。 +PP-LCNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-LCNet 系列模型文档](../models/PP-LCNet.md),[PP-LCNetV2 系列模型文档](../models/PP-LCNetV2.md)。 -| 模型 | Top-1 Acc | Top-5 Acc | Intel-Xeon-Gold-6148 time(ms)
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | +| 模型 | Top-1 Acc | Top-5 Acc | time(ms)*
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | |:--:|:--:|:--:|:--:|----|----|----|:--:| -| PPLCNet_x0_25 |0.5186 | 0.7565 | 1.61785 | 18.25 | 1.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar) | -| PPLCNet_x0_35 |0.5809 | 0.8083 | 2.11344 | 29.46 | 1.65 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar) | -| PPLCNet_x0_5 |0.6314 | 0.8466 | 2.72974 | 47.28 | 1.89 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar) | -| PPLCNet_x0_75 |0.6818 | 0.8830 | 4.51216 | 98.82 | 2.37 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar) | -| PPLCNet_x1_0 |0.7132 | 0.9003 | 6.49276 | 160.81 | 2.96 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar) | -| PPLCNet_x1_5 |0.7371 | 0.9153 | 12.2601 | 341.86 | 4.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar) | -| PPLCNet_x2_0 |0.7518 | 0.9227 | 20.1667 | 590 | 6.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar) | -| PPLCNet_x2_5 |0.7660 | 0.9300 | 29.595 | 906 | 9.04 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar) | +| PPLCNet_x0_25 |0.5186 | 0.7565 | 1.74 | 18.25 | 1.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar) | +| PPLCNet_x0_35 |0.5809 | 0.8083 | 1.92 | 29.46 | 1.65 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar) | +| PPLCNet_x0_5 |0.6314 | 0.8466 | 2.05 | 47.28 | 1.89 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar) | +| PPLCNet_x0_75 |0.6818 | 0.8830 | 2.29 | 98.82 | 2.37 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar) | +| PPLCNet_x1_0 |0.7132 | 0.9003 | 2.46 | 160.81 | 2.96 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar) | +| PPLCNet_x1_5 |0.7371 | 0.9153 | 3.19 | 341.86 | 4.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar) | +| PPLCNet_x2_0 |0.7518 | 0.9227 | 4.27 | 590 | 6.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar) | +| PPLCNet_x2_5 |0.7660 | 0.9300 | 5.39 | 906 | 9.04 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar) | + +| 模型 | Top-1 Acc | Top-5 Acc | time(ms)**
bs=1 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | +|:--:|:--:|:--:|:--:|----|----|----|:--:| +| PPLCNetV2_base | 77.04 | 93.27 | 4.32 | 604 | 6.6 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar) | + + +*: 基于 Intel-Xeon-Gold-6148 硬件平台与 PaddlePaddle 推理平台。 + +**: 基于 Intel-Xeon-Gold-6271C 硬件平台与 OpenVINO 2021.4.2 推理平台。 + + + +## PP-HGNet 系列 + +PP-HGNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[PP-HGNet 系列模型文档](../models/PP-HGNet.md)。 + +| 模型 | Top-1 Acc | Top-5 Acc | time(ms)
bs=1 | time(ms)
bs=4 | time(ms)
bs=8 | FLOPs(G) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| PPHGNet_tiny | 0.7983 | 0.9504 | 1.77 | - | - | 4.54 | 14.75 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar) | +| PPHGNet_tiny_ssld | 0.8195 | 0.9612 | 1.77 | - | - | 4.54 | 14.75 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_ssld_infer.tar) | +| PPHGNet_small | 0.8151 | 0.9582 | 2.52 | - | - | 8.53 | 24.38 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar) | +| PPHGNet_small_ssld | 0.8382 | 0.9681 | 2.52 | - | - | 8.53 | 24.38 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_ssld_infer.tar) | +| PPHGNet_base_ssld | 0.8500 | 0.9735 | 5.97 | - | - | 25.14 | 71.62 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_base_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_base_ssld_infer.tar) | - + -## 4. ResNet 系列 [[1](#ref1)] +## ResNet 系列 [[1](#ref1)] ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[ResNet 及其 Vd 系列模型文档](../models/ResNet_and_vd.md)。 @@ -145,9 +171,9 @@ ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关 | ResNet50_vd_
ssld | 0.8300 | 0.9640 | 2.60 | 4.86 | 7.63 | 4.35 | 25.63 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_ssld_infer.tar) | | ResNet101_vd_
ssld | 0.8373 | 0.9669 | 4.43 | 8.25 | 12.60 | 8.08 | 44.67 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet101_vd_ssld_infer.tar) | - + -## 5. 移动端系列 [[3](#ref3)][[4](#ref4)][[5](#ref5)][[6](#ref6)][[23](#ref23)] +## 移动端系列 [[3](#ref3)][[4](#ref4)][[5](#ref5)][[6](#ref6)][[23](#ref23)] 移动端系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[移动端系列模型文档](../models/Mobile.md)。 @@ -194,9 +220,9 @@ ResNet 及其 Vd 系列模型的精度、速度指标如下表所示,更多关 | ESNet_x0_75 | 0.7224 | 0.9045 |9.59|6.28|4.52| 123.74 | 3.87 | 15 |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams) |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ESNet_x0_75_infer.tar) | | ESNet_x1_0 | 0.7392 | 0.9140 |13.67|8.71|5.97| 197.33 | 4.64 | 18 |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams) |[下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ESNet_x1_0_infer.tar) | - + -## 6. SEResNeXt 与 Res2Net 系列 [[7](#ref7)][[8](#ref8)][[9](#ref9)] +## SEResNeXt 与 Res2Net 系列 [[7](#ref7)][[8](#ref8)][[9](#ref9)] SEResNeXt 与 Res2Net 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[SEResNeXt 与 Res2Net 系列模型文档](../models/SEResNext_and_Res2Net.md)。 @@ -229,9 +255,9 @@ SEResNeXt 与 Res2Net 系列模型的精度、速度指标如下表所示,更 | SE_ResNeXt101_
32x4d | 0.7939 | 0.9443 | 13.31 | 21.85 | 28.77 | 8.03 | 49.09 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_ResNeXt101_32x4d_infer.tar) | | SENet154_vd | 0.8140 | 0.9548 | 34.83 | 51.22 | 69.74 | 24.45 | 122.03 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SENet154_vd_infer.tar) | - + -## 7. DPN 与 DenseNet 系列 [[14](#ref14)][[15](#ref15)] +## DPN 与 DenseNet 系列 [[14](#ref14)][[15](#ref15)] DPN 与 DenseNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[DPN 与 DenseNet 系列模型文档](../models/DPN_DenseNet.md)。 @@ -249,9 +275,9 @@ DPN 与 DenseNet 系列模型的精度、速度指标如下表所示,更多关 | DPN107 | 0.8089 | 0.9532 | 19.46 | 35.62 | 50.22 | 18.38 | 87.13 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DPN107_infer.tar) | | DPN131 | 0.8070 | 0.9514 | 19.64 | 34.60 | 47.42 | 16.09 | 79.48 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DPN131_infer.tar) | - + -## 8. HRNet 系列 [[13](#ref13)] +## HRNet 系列 [[13](#ref13)] HRNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[HRNet 系列模型文档](../models/HRNet.md)。 @@ -268,9 +294,9 @@ HRNet 系列模型的精度、速度指标如下表所示,更多关于该系 | HRNet_W64_C | 0.7930 | 0.9461 | 13.82 | 21.15 | 35.51 | 28.97 | 128.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HRNet_W64_C_infer.tar) | | SE_HRNet_W64_C_ssld | 0.8475 | 0.9726 | 17.11 | 26.87 | 43.24 | 29.00 | 129.12 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/SE_HRNet_W64_C_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SE_HRNet_W64_C_ssld_infer.tar) | - + -## 9. Inception 系列 [[10](#ref10)][[11](#ref11)][[12](#ref12)][[26](#ref26)] +## Inception 系列 [[10](#ref10)][[11](#ref11)][[12](#ref12)][[26](#ref26)] Inception 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[Inception 系列模型文档](../models/Inception.md)。 @@ -285,9 +311,9 @@ Inception 系列模型的精度、速度指标如下表所示,更多关于该 | InceptionV3 | 0.7914 | 0.9459 | 4.78 | 8.53 | 12.28 | 5.73 | 23.87 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/InceptionV3_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/InceptionV3_infer.tar) | | InceptionV4 | 0.8077 | 0.9526 | 8.93 | 15.17 | 21.56 | 12.29 | 42.74 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/InceptionV4_infer.tar) | - + -## 10. EfficientNet 与 ResNeXt101_wsl 系列 [[16](#ref16)][[17](#ref17)] +## EfficientNet 与 ResNeXt101_wsl 系列 [[16](#ref16)][[17](#ref17)] EfficientNet 与 ResNeXt101_wsl 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[EfficientNet 与 ResNeXt101_wsl 系列模型文档](../models/EfficientNet_and_ResNeXt101_wsl.md)。 @@ -308,9 +334,9 @@ EfficientNet 与 ResNeXt101_wsl 系列模型的精度、速度指标如下表所 | EfficientNetB7 | 0.8430 | 0.9689 | 25.91 | 71.23 | 128.20 | 38.45 | 66.66 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/EfficientNetB7_infer.tar) | | EfficientNetB0_
small | 0.7580 | 0.9258 | 1.24 | 2.59 | 3.92 | 0.40 | 4.69 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_small_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/EfficientNetB0_small_infer.tar) | - + -## 11. ResNeSt 与 RegNet 系列 [[24](#ref24)][[25](#ref25)] +## ResNeSt 与 RegNet 系列 [[24](#ref24)][[25](#ref25)] ResNeSt 与 RegNet 系列模型的精度、速度指标如下表所示,更多关于该系列的模型介绍可以参考:[ResNeSt 与 RegNet 系列模型文档](../models/ResNeSt_RegNet.md)。 @@ -320,9 +346,9 @@ ResNeSt 与 RegNet 系列模型的精度、速度指标如下表所示,更多 | ResNeSt50 | 0.8083 | 0.9542 | 7.36 | 10.23 | 13.84 | 5.40 | 27.54 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNeSt50_infer.tar) | | RegNetX_4GF | 0.785 | 0.9416 | 6.46 | 8.48 | 11.45 | 4.00 | 22.23 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RegNetX_4GF_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RegNetX_4GF_infer.tar) | - + -## 12. ViT_and_DeiT 系列 [[31](#ref31)][[32](#ref32)] +## ViT_and_DeiT 系列 [[31](#ref31)][[32](#ref32)] ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模型的精度、速度指标如下表所示. 更多关于该系列模型的介绍可以参考: [ViT_and_DeiT 系列模型文档](../models/ViT_and_DeiT.md)。 @@ -347,9 +373,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | DeiT_base_
distilled_patch16_224 | 0.831 | 0.964 | 6.17 | 14.94 | 28.58 | 16.93 | 87.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_224_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DeiT_base_distilled_patch16_224_infer.tar) | | DeiT_base_
distilled_patch16_384 | 0.851 | 0.973 | 14.12 | 48.76 | 97.09 | 49.43 | 87.18 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_distilled_patch16_384_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DeiT_base_distilled_patch16_384_infer.tar) | - + -## 13. RepVGG 系列 [[36](#ref36)] +## RepVGG 系列 [[36](#ref36)] 关于 RepVGG 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[RepVGG 系列模型文档](../models/RepVGG.md)。 @@ -366,9 +392,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | RepVGG_B2g4 | 0.7881 | 0.9448 | | | | 11.34 | 55.78 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B2g4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RepVGG_B2g4_infer.tar) | | RepVGG_B3g4 | 0.7965 | 0.9485 | | | | 16.07 | 75.63 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RepVGG_B3g4_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RepVGG_B3g4_infer.tar) | - + -## 14. MixNet 系列 [[29](#ref29)] +## MixNet 系列 [[29](#ref29)] 关于 MixNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MixNet 系列模型文档](../models/MixNet.md)。 @@ -378,9 +404,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | MixNet_M | 0.7767 | 0.9364 | 2.84 | 4.60 | 6.62 | 357.119 | 5.065 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MixNet_M_infer.tar) | | MixNet_L | 0.7860 | 0.9437 | 3.16 | 5.55 | 8.03 | 579.017 | 7.384 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MixNet_L_infer.tar) | - + -## 15. ReXNet 系列 [[30](#ref30)] +## ReXNet 系列 [[30](#ref30)] 关于 ReXNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[ReXNet 系列模型文档](../models/ReXNet.md)。 @@ -392,9 +418,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | ReXNet_2_0 | 0.8122 | 0.9536 | 4.30 | 6.54 | 9.19 | 1.56 | 16.45 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ReXNet_2_0_infer.tar) | | ReXNet_3_0 | 0.8209 | 0.9612 | 5.74 | 9.49 | 13.62 | 3.44 | 34.83 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ReXNet_3_0_infer.tar) | - + -## 16. SwinTransformer 系列 [[27](#ref27)] +## SwinTransformer 系列 [[27](#ref27)] 关于 SwinTransformer 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[SwinTransformer 系列模型文档](../models/SwinTransformer.md)。 @@ -411,9 +437,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 [1]:基于 ImageNet22k 数据集预训练,然后在 ImageNet1k 数据集迁移学习得到。 - + -## 17. LeViT 系列 [[33](#ref33)] +## LeViT 系列 [[33](#ref33)] 关于 LeViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[LeViT 系列模型文档](../models/LeViT.md)。 @@ -427,9 +453,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 **注**:与 Reference 的精度差异源于数据预处理不同及未使用蒸馏的 head 作为输出。 - + -## 18. Twins 系列 [[34](#ref34)] +## Twins 系列 [[34](#ref34)] 关于 Twins 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[Twins 系列模型文档](../models/Twins.md)。 @@ -444,9 +470,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 **注**:与 Reference 的精度差异源于数据预处理不同。 - + -## 19. HarDNet 系列 [[37](#ref37)] +## HarDNet 系列 [[37](#ref37)] 关于 HarDNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[HarDNet 系列模型文档](../models/HarDNet.md)。 @@ -457,9 +483,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | HarDNet68| 0.7546 | 0.9265 | 3.58 | 8.53 | 11.58 | 4.26 | 17.58 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HarDNet68_infer.tar) | | HarDNet85 | 0.7744 | 0.9355 | 6.24 | 14.85 | 20.57 | 9.09 | 36.69 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/HarDNet85_infer.tar) | - + -## 20. DLA 系列 [[38](#ref38)] +## DLA 系列 [[38](#ref38)] 关于 DLA 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[DLA 系列模型文档](../models/DLA.md)。 @@ -475,9 +501,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | DLA60x_c | 0.6645 | 0.8754 | 1.79 | 3.68 | 5.19 | 0.59 | 1.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DLA60x_c_infer.tar) | | DLA60x | 0.7753 | 0.9378 | 5.98 | 9.24 | 12.52 | 3.54 | 17.41 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/DLA60x_infer.tar) | - + -## 21. RedNet 系列 [[39](#ref39)] +## RedNet 系列 [[39](#ref39)] 关于 RedNet 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[RedNet 系列模型文档](../models/RedNet.md)。 @@ -489,9 +515,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | RedNet101 | 0.7894 | 0.9436 | 13.07 | 44.12 | 83.28 | 4.59 | 25.76 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RedNet101_infer.tar) | | RedNet152 | 0.7917 | 0.9440 | 18.66 | 63.27 | 119.48 | 6.57 | 34.14 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/RedNet152_infer.tar) | - + -## 22. TNT 系列 [[35](#ref35)] +## TNT 系列 [[35](#ref35)] 关于 TNT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[TNT 系列模型文档](../models/TNT.md)。 @@ -501,9 +527,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 **注**:TNT 模型的数据预处理部分 `NormalizeImage` 中的 `mean` 与 `std` 均为 0.5。 - + -## 23. CSWinTransformer 系列 [[40](#ref40)] +## CSWinTransformer 系列 [[40](#ref40)] 关于 CSWinTransformer 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[CSWinTransformer 系列模型文档](../models/CSWinTransformer.md)。 @@ -517,9 +543,9 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | CSWinTransformer_large_384 | 0.8748 | 0.9833 | - | - | - | 94.7 | 173.3 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_384_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/CSWinTransformer_large_384_infer.tar) | - + -## 24. PVTV2 系列 [[41](#ref41)] +## PVTV2 系列 [[41](#ref41)] 关于 PVTV2 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[PVTV2 系列模型文档](../models/PVTV2.md)。 @@ -534,21 +560,21 @@ ViT(Vision Transformer) 与 DeiT(Data-efficient Image Transformers)系列模 | PVT_V2_B5 | 0.837 | 0.966 | - | - | - | 11.4 | 82.0 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PVT_V2_B5_infer.tar) | - + -## 25. MobileViT 系列 [[42](#ref42)] +## MobileViT 系列 [[42](#ref42)] 关于 MobileViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MobileViT 系列模型文档](../models/MobileViT.md)。 | 模型 | Top-1 Acc | Top-5 Acc | time(ms)
bs=1 | time(ms)
bs=4 | time(ms)
bs=8 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | | ---------- | --------- | --------- | ---------------- | ---------------- | -------- | --------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| MobileViT_XXS | 0.6867 | 0.8878 | - | - | - | 1849.35 | 5.59 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XXS_infer.tar) | +| MobileViT_XXS | 0.6867 | 0.8878 | - | - | - | 337.24 | 1.28 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XXS_infer.tar) | | MobileViT_XS | 0.7454 | 0.9227 | - | - | - | 930.75 | 2.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XS_infer.tar) | -| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 337.24 | 1.28 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) | +| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 1849.35 | 5.59 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) | - + -## 26. 其他模型 +## 其他模型 关于 AlexNet [[18](#ref18)]、SqueezeNet 系列 [[19](#ref19)]、VGG 系列 [[20](#ref20)]、DarkNet53 [[21](#ref21)] 等模型的精度、速度指标如下表所示,更多介绍可以参考:[其他模型文档](../models/Others.md)。 diff --git a/docs/zh_CN/algorithm_introduction/action_rec_by_classification.md b/docs/zh_CN/algorithm_introduction/action_rec_by_classification.md new file mode 100644 index 0000000000000000000000000000000000000000..bf8272be99128eef40a4bdbfbc6a0273b2f51c8e --- /dev/null +++ b/docs/zh_CN/algorithm_introduction/action_rec_by_classification.md @@ -0,0 +1,201 @@ +# 基于图像分类的打电话行为识别模型 + +------ + +## 目录 +- [1. 模型和应用场景介绍](#1) +- [2. 模型训练、评估和预测](#2) + - [2.1 PaddleClas 环境安装](#2.1) + - [2.2 数据准备](#2.2) + - [2.2.1 数据集下载](#2.2.1) + - [2.2.2 训练及测试图像处理](#2.2.2) + - [2.2.3 标注文件准备](#2.2.3) + - [2.3 模型训练](#2.3) + - [2.4 模型评估](#2.4) + - [2.5 模型预测](#2.5) +- [3. 模型推理部署](#3) + - [3.1 模型导出](#3.1) + - [3.2 执行模型预测](#3.2) +- [4. 在PP-Human中使用该模型](#4) + +
+ +
数据来源及版权归属:天覆科技,感谢提供并开源实际场景数据,仅限学术研究使用
+
+ + + +## 1. 模型和应用场景介绍 +行为识别在智慧社区,安防监控等方向具有广泛应用。根据行为的不同,一些行为可以通过图像直接进行行为判断(例如打电话)。这里我们提供了基于图像分类的打电话行为识别模型,对人物图像进行是否打电话的二分类识别。 + +| 任务 | 算法 | 精度 | 预测速度(ms) | 模型权重 | +| ---- | ---- | ---- | ---- | ------ | +| 打电话行为识别 | PP-HGNet-tiny | 准确率: 86.85 | 单人 2.94ms | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_tiny_calling_halfbody.pdparams) | + +注: +1. 该模型使用[UAV-Human](https://github.com/SUTDCV/UAV-Human)的打电话行为部分进行训练和测试。 +2. 预测速度为NVIDIA T4 机器上使用TensorRT FP16时的速度, 速度包含数据预处理、模型预测、后处理全流程。 + +该模型为实时行人分析工具[PP-Human](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/deploy/pipeline)中行为识别功能的一部分,欢迎体验PP-Human的完整功能。 + + +## 2. 模型训练、评估和预测 + + + +### 2.1 PaddleClas 环境安装 +请根据[环境准备](../installation/install_paddleclas.md)完成PaddleClas的环境依赖准备。 + + + +### 2.2 数据准备 + + + +#### 2.2.1 数据集下载 +打电话的行为识别是基于公开数据集[UAV-Human](https://github.com/SUTDCV/UAV-Human)进行训练的。请通过该链接填写相关数据集申请材料后获取下载链接。 + +在`UAVHuman/ActionRecognition/RGBVideos`路径下包含了该数据集中RGB视频数据集,每个视频的文件名即为其标注信息。 + + + +#### 2.2.2 训练及测试图像处理 +根据视频文件名,其中与行为识别相关的为`A`相关的字段(即action),我们可以找到期望识别的动作类型数据。 +- 正样本视频:以打电话为例,我们只需找到包含`A024`的文件。 +- 负样本视频:除目标动作以外所有的视频。 + +鉴于视频数据转化为图像会有较多冗余,对于正样本视频,我们间隔8帧进行采样,并使用行人检测模型处理为半身图像(取检测框的上半部分,即`img = img[:H/2, :, :]`)。正样本视频中的采样得到的图像即视为正样本,负样本视频中采样得到的图像即为负样本。 + +**注意**: 正样本视频中并不完全符合打电话这一动作,在视频开头结尾部分会出现部分冗余动作,需要移除。 + + + +#### 2.2.3 标注文件准备 +根据[PaddleClas数据集格式说明](../data_preparation/classification_dataset.md),标注文件样例如下,其中`0`,`1`分别是图片对应所属的类别: +``` + # 每一行采用"空格"分隔图像路径与标注 + train/000001.jpg 0 + train/000002.jpg 0 + train/000003.jpg 1 + ... +``` + +此外,标签文件`phone_label_list.txt`,帮助将分类序号映射到具体的类型名称: +``` +0 make_a_phone_call # 类型0 +1 normal # 类型1 +``` + +完成上述内容后,放置于`dataset`目录下,文件结构如下: +``` +data/ +├── images # 放置所有图片 +├── phone_label_list.txt # 标签文件 +├── phone_train_list.txt # 训练列表,包含图片及其对应类型 +└── phone_val_list.txt # 测试列表,包含图片及其对应类型 +``` + + +### 2.3 模型训练 + +通过如下命令启动训练: +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml \ + -o Arch.pretrained=True +``` +其中 `Arch.pretrained` 为 `True`表示使用预训练权重帮助训练。 + + + +### 2.4 模型评估 +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ./ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml \ + -o Global.pretrained_model=output/PPHGNet_tiny/best_model +``` + +其中 `-o Global.pretrained_model="output/PPHGNet_tiny/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + +### 2.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```bash +python3 tools/infer.py \ + -c ./ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml \ + -o Global.pretrained_model=output/PPHGNet_tiny/best_model + -o Infer.infer_imgs={your test image} +``` + + + +## 3. 模型推理部署 +Paddle Inference 是飞桨的原生推理库,作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用 MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于 Paddle Inference 推理引擎的介绍,可以参考 [Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + + + +### 3.1 模型导出 +```bash +python3 tools/export_model.py \ + -c ./ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml \ + -o Global.pretrained_model=output/PPHGNet_tiny/best_model \ + -o Global.save_inference_dir=deploy/models//PPHGNet_tiny_calling_halfbody/ +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPHGNet_tiny_calling_halfbody` 文件夹,文件结构如下: + +``` +├── PPHGNet_tiny_calling_halfbody +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 3.2 执行模型预测 +在`deploy`下,执行下列命令: + +```bash +# Current path is {root of PaddleClas}/deploy + +python3 python/predict_cls.py -c configs/inference_cls_based_action.yaml +``` + + + +## 4. 在PP-Human中使用该模型 +[PP-Human](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/deploy/pipeline)是基于飞桨深度学习框架的业界首个开源产业级实时行人分析工具,具有功能丰富,应用广泛和部署高效三大优势。该模型可以应用于PP-Human中,实现实时视频的打电话行为识别功能。 + +由于当前的PP-Human功能集成在[PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection)中,需要按以下步骤实现该模型在PP-Human中的调用适配。 + +1. 完成模型导出 +2. 重命名模型 +```bash +cd deploy/models/PPHGNet_tiny_calling_halfbody + +mv inference.pdiparams model.pdiparams +mv inference.pdiparams.info model.pdiparams.info +mv inference.pdmodel model.pdmodel +``` +3. 下载[预测配置文件](https://bj.bcebos.com/v1/paddledet/models/pipeline/infer_configs/PPHGNet_tiny_calling_halfbody/infer_cfg.yml) + +``` bash +wget https://bj.bcebos.com/v1/paddledet/models/pipeline/infer_configs/PPHGNet_tiny_calling_halfbody/infer_cfg.yml +``` +完成后文件结构如下,即可在PP-Human中使用: +``` +PPHGNet_tiny_calling_halfbody +├── infer_cfg.yml +├── model.pdiparams +├── model.pdiparams.info +└── model.pdmodel +``` + +详细请参考[基于图像分类的行为识别——打电话识别](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/deploy/pipeline/docs/tutorials/action.md#%E5%9F%BA%E4%BA%8E%E5%9B%BE%E5%83%8F%E5%88%86%E7%B1%BB%E7%9A%84%E8%A1%8C%E4%B8%BA%E8%AF%86%E5%88%AB%E6%89%93%E7%94%B5%E8%AF%9D%E8%AF%86%E5%88%AB)。 diff --git a/docs/zh_CN/algorithm_introduction/knowledge_distillation.md b/docs/zh_CN/algorithm_introduction/knowledge_distillation.md index 58092195956119416277e62ec225318373a2bfa3..afedce7f07117c857717575ca063bab8a5decc66 100644 --- a/docs/zh_CN/algorithm_introduction/knowledge_distillation.md +++ b/docs/zh_CN/algorithm_introduction/knowledge_distillation.md @@ -42,7 +42,7 @@ PaddleClas 中提出了一种简单使用的 SSLD 知识蒸馏算法 [6],在训练的时候去除了对 gt label 的依赖,结合大量无标注数据,最终蒸馏训练得到的预训练模型在 15 个模型上的精度提升平均高达 3%。 -上述标准的蒸馏方法是通过一个大模型作为教师模型来指导学生模型提升效果,而后来又发展出 DML(Deep Mutual Learning)互学习蒸馏方法 [7],即通过两个结构相同的模型互相学习。具体的。相比于 KD 等依赖于大的教师模型的知识蒸馏算法,DML 脱离了对大的教师模型的依赖,蒸馏训练的流程更加简单,模型产出效率也要更高一些。 +上述标准的蒸馏方法是通过一个大模型作为教师模型来指导学生模型提升效果,而后来又发展出 DML(Deep Mutual Learning)互学习蒸馏方法 [7],即通过两个结构相同的模型互相学习。具体的。相比于 KD 等依赖于大的教师模型的知识蒸馏算法,DML 脱离了对大的教师模型的依赖,蒸馏训练的流程更加简单,模型产出效率也要更高一些。 ### 3.2 Feature based distillation diff --git a/docs/zh_CN/algorithm_introduction/reid.md b/docs/zh_CN/algorithm_introduction/reid.md new file mode 100644 index 0000000000000000000000000000000000000000..4c0cad846caa81b838ac8c0ec664e9108e3dc647 --- /dev/null +++ b/docs/zh_CN/algorithm_introduction/reid.md @@ -0,0 +1,363 @@ +简体中文 | [English](../../en/algorithm_introduction/reid.md) + +# ReID行人重识别 + +## 目录 + +- [1. 算法/应用场景简介](#1-算法应用场景简介) +- [2. 常用数据集与指标](#2-常用数据集与指标) + - [2.1 常用数据集](#21-常用数据集) + - [2.2 常用指标](#22-常用指标) +- [3. ReID算法](#3-reid算法) + - [3.1 ReID strong-baseline](#31-reid-strong-baseline) + - [3.1.1 原理介绍](#311-原理介绍) + - [3.1.2 精度指标](#312-精度指标) + - [3.1.3 数据准备](#313-数据准备) + - [3.1.4 模型训练](#314-模型训练) +- [4. 模型评估与推理部署](#4-模型评估与推理部署) + - [4.1 模型评估](#41-模型评估) + - [4.2 模型推理](#42-模型推理) + - [4.2.1 推理模型准备](#421-推理模型准备) + - [4.2.2 基于 Python 预测引擎推理](#422-基于-python-预测引擎推理) + - [4.2.3 基于 C++ 预测引擎推理](#423-基于-c-预测引擎推理) + - [4.3 服务化部署](#43-服务化部署) + - [4.4 端侧部署](#44-端侧部署) + - [4.5 Paddle2ONNX 模型转换与预测](#45-paddle2onnx-模型转换与预测) +- [5. 总结](#5-总结) + - [5.1 方法总结与对比](#51-方法总结与对比) + - [5.2 使用建议/FAQ](#52-使用建议faq) +- [6. 参考资料](#6-参考资料) + +### 1. 算法/应用场景简介 + +行人重识别(Person re-identification, Re-ID)也称行人再识别,作为跨镜头的行人检索问题被广泛研究。给定某一个摄像机拍摄下的行人图片,目标是判断该行人是否在不同相机或者不同时间段拍摄的画面中出现过。给定的行人数据可以是一张图片,也可以是视频帧,甚至可以是一段文字描述。近年来,公共安全领域对该技术的应用需求日益增加,行人重识别在智能监控技术中的影响也越来越大。 + +目前行人重识别仍然是一个具有挑战性的任务,尤其是不同的视点、分辨率、光照变化、遮挡情况、多模态,以及复杂的相机环境与背景、标注数据噪声等问题,给识别算法带来了很大的不确定性。另外,在实际落地时,拍摄相机可能会发生变化,大规模的检索库、数据集的分布偏移、未知的场景、模型增量更新以及检索人物的服装变化,这同样增加了不少困难。 + +早期的行人重识别工作主要关注手工设计特征提取算子,包括加入人体姿态特征,或者距离度量函数的学习。随着深度学习技术的发展,行人重识也取得了巨大的进步。总的来说,行人重识别整个过程包括5个步骤:1)数据采集,2)行人位置框标注,3)行人类别标注,4)模型训练,5)行人检索(模型测试)。 + + + +### 2. 常用数据集与指标 + +#### 2.1 常用数据集 + +| Dataset | #ID | #Image | #cam | +| :---------- | :----: | :----: | :---: | +| VIPeR | 632 | 1264 | 2 | +| iLIDS | 119 | 476 | 2 | +| GRID | 250 | 1275 | 8 | +| PRID2011 | 200 | 1134 | 2 | +| CUHK01 | 971 | 3884 | 2 | +| CUHK02 | 1816 | 7264 | 10 | +| CUHK03 | 1467 | 13164 | 2 | +| Market-1501 | 1501 | 32668 | 6 | +| DukeMTMC | 1404 | 36411 | 8 | +| Airport | 39902 | 39902 | 6 | +| MSMT17 | 126441 | 126441 | 15 | + +#### 2.2 常用指标 + +1. CMC曲线 + + 公式如下: + $$ CMC(K)=\frac{1}{N} \sum_{i=1}^{N} \begin{cases} 1, & \text{if $label_i \in Top{K}(result_i)$} \\\\ 0, & \text{if $label_i \notin Top{K}(result_i)$} \end{cases} $$ + + 其中$N$为查询样本的数量,$result_i$为每个查询样本检索结果的标签集合,根据公式可以将CMC曲线理解为Top1-Acc、Top2-Acc、...、TopK-Acc构成的数组,显然这是一个单调不降的曲线。其中常见的Rank-1、Top1-Acc指标即是指CMC(1) + +2. mAP指标 + + 假设使用一个查询样本,返回了一组查询结果,那么按照以下公式,逐个考虑前K个查询结果,对于每个K,算出其精确率Precision和召回率Recall。 + $$\begin{align} precision&=\frac{|\\{同类别图片\\} \cap \\{前K个查询结果\\}|}{|\\{前K个查询结果\\}|} \\\\ recall&=\frac{|\\{同类别图片\\} \cap \\{前K个查询结果\\}|}{|\\{同类别图片\\}|} \end{align}$$ + 将得到的多组(Precision, Recall)化成曲线图,该曲线与坐标轴围成的面积,称为Average Precision(AP), + 对于每个样本,计算其AP值,然后取平均,就得到了mAP指标。 +### 3. ReID算法 + +#### 3.1 ReID strong-baseline + +论文出处:[Bag of Tricks and A Strong Baseline for Deep Person Re-identification](https://openaccess.thecvf.com/content_CVPRW_2019/papers/TRMTMCT/Luo_Bag_of_Tricks_and_a_Strong_Baseline_for_Deep_Person_CVPRW_2019_paper.pdf) + + + +##### 3.1.1 原理介绍 + +作者以普遍使用的基于 ResNet50 的行人重识别模型为基础,探索并总结了以下几种有效且适用性较强的优化方法,大幅度提高了在多个行人重识别数据集上的指标。 + +1. Warmup:在训练一开始让学习率从一个较小值逐渐升高后再开始下降,有利于梯度下降优化时的稳定性,从而找到更优的参数模型。 +2. Random erasing augmentation:随机区域擦除,通过数据增强来提升模型的泛化能力。 +3. Label smoothing:标签平滑,提升模型的泛化能力。 +4. Last stride=1:设定特征提取模块的最后一个stage的下采样为1,增大输出特征图的分辨率来保留更多细节,提升模型的分类能力。 +5. BNNeck:特征向量输入分类头之前先经过BNNeck,让特征在超球体表面服从正态分布,减少了同时优化IDLoss和TripLetLoss的难度。 +6. Center loss:给每个类别一个可学习的聚类中心,训练时让类内特征靠近聚类中心,减少类内差异,增大类间差异。 +7. Reranking:在检索时考虑查询图像的近邻候选对象,根据候选对象的近邻图像的是否也含有查询图像的情况来优化距离矩阵,最终提升检索精度。 + +##### 3.1.2 精度指标 + +以下表格总结了复现的ReID strong-baseline的3种配置在 Market1501 数据集上的精度指标, + +| 配置文件 | recall@1(\%) | mAP(\%) | 参考recall@1(\%) | 参考mAP(\%) | 预训练模型下载地址 | inference模型下载地址 | +| -------------------------------- | ------------ | ------- | ---------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| baseline.yaml | 88.45 | 74.37 | 87.7 | 74.0 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/baseline_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/baseline_infer.tar) | +| softmax_triplet.yaml | 94.29 | 85.57 | 94.1 | 85.7 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/softmax_triplet_infer.tar) | +| softmax_triplet_with_center.yaml | 94.50 | 85.82 | 94.5 | 85.9 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_with_center_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/softmax_triplet_with_center_infer.tar) | + +注:上述参考指标由使用作者开源的代码在我们的设备上训练多次得到,由于系统环境、torch版本、CUDA版本不同等原因,与作者提供的指标可能存在略微差异。 + +接下来主要以`softmax_triplet_with_center.yaml`配置和训练好的模型文件为例,展示在 Market1501 数据集上进行训练、测试、推理的过程。 + +##### 3.1.3 数据准备 + +下载 [Market-1501-v15.09.15.zip](https://pan.baidu.com/s/1ntIi2Op?_at_=1654142245770) 数据集,解压到`PaddleClas/dataset/`下,并组织成以下文件结构: + + ```shell + PaddleClas/dataset/market1501 + └── Market-1501-v15.09.15/ + ├── bounding_box_test/ # gallery集图片 + ├── bounding_box_train/ # 训练集图片 + ├── gt_bbox/ + ├── gt_query/ + ├── query/ # query集图片 + ├── generate_anno.py + ├── bounding_box_test.txt # gallery集路径 + ├── bounding_box_train.txt # 训练集路径 + ├── query.txt # query集路径 + └── readme.txt + ``` + +##### 3.1.4 模型训练 + +1. 执行以下命令开始训练 + + 单卡训练: + ```shell + python3.7 tools/train.py -c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml + ``` + + 多卡训练: + + 多卡训练需修改训练配置的采样器字段以适配分布式训练,如下所示: + ```yaml + sampler: + name: PKSampler + batch_size: 64 + sample_per_id: 4 + drop_last: False + sample_method: id_avg_prob + shuffle: True + ``` + 然后执行以下命令: + ```shell + export CUDA_VISIBLE_DEVICES=0,1,2,3 + python3.7 -m paddle.distributed.launch --gpus="0,1,2,3" tools/train.py \ + -c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml + ``` + 注:单卡训练大约需要1个小时。 + +2. 查看训练日志和保存的模型参数文件 + + 训练过程中会在屏幕上实时打印loss等指标信息,同时会保存日志文件`train.log`、模型参数文件`*.pdparams`、优化器参数文件`*.pdopt`等内容到`Global.output_dir`指定的文件夹下,默认在`PaddleClas/output/RecModel/`文件夹下。 + +### 4. 模型评估与推理部署 + +#### 4.1 模型评估 + +准备用于评估的`*.pdparams`模型参数文件,可以使用训练好的模型,也可以使用[2.1.4 模型训练](#214-模型训练)中保存的模型。 + +- 以训练过程中保存的`latest.pdparams`为例,执行如下命令即可进行评估。 + + ```shell + python3.7 tools/eval.py \ + -c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="./output/RecModel/latest" + ``` + +- 以训练好的模型为例,下载 [softmax_triplet_with_center_pretrained.pdparams](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_with_center_pretrained.pdparams) 到 `PaddleClas/pretrained_models` 文件夹中,执行如下命令即可进行评估。 + + ```shell + # 下载模型 + cd PaddleClas + mkdir pretrained_models + cd pretrained_models + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/pretrain/softmax_triplet_with_center_pretrained.pdparams + cd .. + # 评估 + python3.7 tools/eval.py \ + -c ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="pretrained_models/softmax_triplet_with_center_pretrained" + ``` + 注:`pretrained_model` 后填入的地址不需要加 `.pdparams` 后缀,在程序运行时会自动补上。 + +- 查看输出结果 + ```log + ... + ... + ppcls INFO: unique_endpoints {''} + ppcls INFO: Found /root/.paddleclas/weights/resnet50-19c8e357_torch2paddle.pdparams + ppcls INFO: gallery feature calculation process: [0/125] + ppcls INFO: gallery feature calculation process: [20/125] + ppcls INFO: gallery feature calculation process: [40/125] + ppcls INFO: gallery feature calculation process: [60/125] + ppcls INFO: gallery feature calculation process: [80/125] + ppcls INFO: gallery feature calculation process: [100/125] + ppcls INFO: gallery feature calculation process: [120/125] + ppcls INFO: Build gallery done, all feat shape: [15913, 2048], begin to eval.. + ppcls INFO: query feature calculation process: [0/27] + ppcls INFO: query feature calculation process: [20/27] + ppcls INFO: Build query done, all feat shape: [3368, 2048], begin to eval.. + ppcls INFO: re_ranking=False + ppcls INFO: [Eval][Epoch 0][Avg]recall1: 0.94507, recall5: 0.98248, mAP: 0.85827 + ``` + 默认评估日志保存在`PaddleClas/output/RecModel/eval.log`中,可以看到我们提供的 `softmax_triplet_with_center_pretrained.pdparams` 模型在 Market1501 数据集上的评估指标为recall@1=0.94507,recall@5=0.98248,mAP=0.85827 + +- 使用re-ranking功能提升评估精度 + + re-ranking的主要思想是利用检索库之间的相互关系来进一步优化检索结果,比较广泛使用的是k-reciprocal算法。在PaddleClas中在评估时开启re-ranking来提升最终的检索精度。 + 如下所示,在评估命令中加上 `-o Global.re_ranking=True` 即可开启该功能。 + ```bash + python3.7 tools/eval.py \ + -c ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="pretrained_models/softmax_triplet_with_center_pretrained" \ + -o Global.re_ranking=True + ``` + 查看输出结果 + ```log + ... + ... + ppcls INFO: unique_endpoints {''} + ppcls INFO: Found /root/.paddleclas/weights/resnet50-19c8e357_torch2paddle.pdparams + ppcls INFO: gallery feature calculation process: [0/125] + ppcls INFO: gallery feature calculation process: [20/125] + ppcls INFO: gallery feature calculation process: [40/125] + ppcls INFO: gallery feature calculation process: [60/125] + ppcls INFO: gallery feature calculation process: [80/125] + ppcls INFO: gallery feature calculation process: [100/125] + ppcls INFO: gallery feature calculation process: [120/125] + ppcls INFO: Build gallery done, all feat shape: [15913, 2048], begin to eval.. + ppcls INFO: query feature calculation process: [0/27] + ppcls INFO: query feature calculation process: [20/27] + ppcls INFO: Build query done, all feat shape: [3368, 2048], begin to eval.. + ppcls INFO: re_ranking=True + ppcls WARNING: re_ranking=True,Recallk.descending has been set to False + ppcls WARNING: re_ranking=True,mAP.descending has been set to False + ppcls INFO: using GPU to compute original distance + ppcls INFO: starting re_ranking + ppcls INFO: [Eval][Epoch 0][Avg]recall1: 0.95546, recall5: 0.97743, mAP: 0.94252 + ``` + 可以看到开启re-ranking后,评估指标为recall@1=0.95546,recall@5=0.97743,mAP=0.94252,可以发现该算法对mAP指标的提升比较明显(0.85827->0.94252)。 + + **注**:目前re-ranking的计算复杂度较高,因此默认不启用。 + +#### 4.2 模型推理 + +##### 4.2.1 推理模型准备 + +可以将训练过程中保存的模型文件转换成 inference 模型并推理,或者使用我们提供的转换好的 inference 模型直接进行推理 + - 将训练过程中保存的模型文件转换成 inference 模型,同样以 `latest.pdparams` 为例,执行以下命令进行转换 + ```shell + python3.7 tools/export_model.py \ + -c ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \ + -o Global.pretrained_model="output/RecModel/latest" \ + -o Global.save_inference_dir="./deploy/softmax_triplet_with_center_infer" + ``` + + - 或者下载并解压我们提供的 inference 模型 + ```shell + cd PaddleClas/deploy + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/reid/inference/softmax_triplet_with_center_infer.tar + tar xf softmax_triplet_with_center_infer.tar + cd ../ + ``` + +##### 4.2.2 基于 Python 预测引擎推理 + + 1. 修改 `PaddleClas/deploy/configs/inference_rec.yaml` + - 将 `infer_imgs:` 后的路径段改为 Market1501 中 query 文件夹下的任意一张图片路径(下方配置使用的是`0294_c1s1_066631_00.jpg`图片的路径) + - 将 `rec_inference_model_dir:` 后的字段改为解压出来的 softmax_triplet_with_center_infer 文件夹路径 + - 将 `transform_ops:` 字段下的预处理配置改为 `softmax_triplet_with_center.yaml` 中`Eval.Query.dataset` 下的预处理配置 + + ```yaml + Global: + infer_imgs: "../dataset/market1501/Market-1501-v15.09.15/query/0294_c1s1_066631_00.jpg" + rec_inference_model_dir: "./softmax_triplet_with_center_infer" + batch_size: 1 + use_gpu: False + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: False + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False + + RecPreProcess: + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + + RecPostProcess: null + ``` + + 2. 执行推理命令 + + ```shell + cd PaddleClas/deploy/ + python3.7 python/predict_rec.py -c ./configs/inference_rec.yaml + ``` + + 3. 查看输出结果,实际结果为一个长度2048的向量,表示输入图片经过模型转换后得到的特征向量 + + ```log + 0294_c1s1_066631_00.jpg: [ 0.01806974 0.00476423 -0.00508293 ... 0.03925538 0.00377574 + -0.00849029] + ``` + 推理时的输出向量储存在[predict_rec.py](../../../deploy/python/predict_rec.py#L134-L135)的`result_dict`变量中。 + + 4. 批量预测,将配置文件中`infer_imgs:`后的路径改为为文件夹即可,如`../dataset/market1501/Market-1501-v15.09.15/query`,会预测并逐个输出query下所有图片的特征向量。 + +##### 4.2.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考基于 Visual Studio 2019 Community CMake 编译指南完成相应的预测库编译和模型预测工作。 + +#### 4.3 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考Paddle Serving 代码仓库。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + +#### 4.4 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考Paddle Lite 代码仓库。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + +#### 4.5 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考Paddle2ONNX 代码仓库。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 + +### 5. 总结 + +#### 5.1 方法总结与对比 + +上述算法能快速地迁移至多数的ReID模型中,能进一步提升ReID模型的性能。 + +#### 5.2 使用建议/FAQ + +Market1501 数据集比较小,可以尝试训练多次取最高精度。 + +### 6. 参考资料 + +1. [Bag of Tricks and A Strong Baseline for Deep Person Re-identification](https://openaccess.thecvf.com/content_CVPRW_2019/papers/TRMTMCT/Luo_Bag_of_Tricks_and_a_Strong_Baseline_for_Deep_Person_CVPRW_2019_paper.pdf) +2. [michuanhaohao/reid-strong-baseline](https://github.com/michuanhaohao/reid-strong-baseline) +3. [行人重识别数据集之 Market1501 数据集_star_function的博客-CSDN博客_market1501数据集](https://blog.csdn.net/qq_39220334/article/details/121470106) +4. [Deep Learning for Person Re-identification:A Survey and Outlook](https://arxiv.org/abs/2001.04193) +5. [ReID任务中的CMC和mAP](https://wrong.wang/blog/20190223-reid%E4%BB%BB%E5%8A%A1%E4%B8%AD%E7%9A%84cmc%E5%92%8Cmap/) diff --git a/docs/zh_CN/faq_series/faq_2022_s1.md b/docs/zh_CN/faq_series/faq_2022_s1.md new file mode 100644 index 0000000000000000000000000000000000000000..07cbdf3c8ba984f5c5e76acf461b4b5d9a2b949c --- /dev/null +++ b/docs/zh_CN/faq_series/faq_2022_s1.md @@ -0,0 +1,57 @@ +# PaddleClas 相关常见问题汇总 - 2022 第 1 季 + +## 写在前面 + +* 我们收集整理了开源以来在 issues 和用户群中的常见问题并且给出了简要解答,旨在为广大用户提供一些参考,也希望帮助大家少走一些弯路。 + +* 图像分类、识别、检索领域大佬众多,模型和论文更新速度也很快,本文档回答主要依赖有限的项目实践,难免挂一漏万,如有遗漏和不足,也希望有识之士帮忙补充和修正,万分感谢。 + +## 目录 + +- [1. 理论篇](#1-理论篇) +- [2. 实战篇](#2-实战篇) + - [2.1 训练与评估共性问题](#21-训练与评估共性问题) + - [Q2.1.1 如何在训练时冻结某些层的参数?](#q211-如何在训练时冻结某些层的参数) + + +## 1. 理论篇 + + +## 2. 实战篇 + + +### 2.1 训练与评估共性问题 + +#### Q2.1.1 如何在训练时冻结某些层的参数? +**A**:目前有三种方法可以使用 +1. 手动修改模型代码,使用`paddle.ParamAttr(learning_rate=0.0)`,将冻结层的学习率设置为0.0,具体用法可以查看[paddle.ParamAttr文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/ParamAttr_cn.html#paramattr)。如下代码可以将self.conv层的weight参数学习率设置为0.0。 + ```python + self.conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(learning_rate=0.0), # <--在这里设置 + bias_attr=False, + data_format=data_format) + ``` + +2. 手动设置冻结层的stop_gradient=True,可参考[此链接](https://github.com/RainFrost1/PaddleClas/blob/24e968b8d9f7d9e2309e713cbf2afe8fda9deacd/ppcls/engine/train/train_idml.py#L40-L66)。使用此方法后,梯度回传到strop_gradient的层之后,停止反向回传,即之前的层的权重也会被固定。 + +3. 在loss.backward()之后,optimizer.step()之前,使用nn.Layer或者paddle.Tensor的clear_gradients()方法。对要固定的层或参数,调用此方法,不用影响loss回传。如下代码可以清空某一层的梯度或者是某一层的某个参数张量的梯度 + ```python + import paddle + linear = paddle.nn.Linear(3, 4) + x = paddle.randn([4, 3]) + y = linear(x) + loss = y.sum().backward() + + print(linear.weight.grad) + print(linear.bias.grad) + linear.clear_gradients() # 清空整个Linear层的梯度,包括linear.weight和linear.bias + # linear.weight.clear_grad() # 只清空Linear.weight的梯度 + print(linear.weight.grad) + print(linear.bias.grad) + ``` diff --git a/docs/zh_CN/image_recognition_pipeline/feature_extraction.md b/docs/zh_CN/image_recognition_pipeline/feature_extraction.md index 1438e9661200ede1adf67cf6813f763c3a13c095..368abc3da9856c8d9232819aef3b43f0ef66735d 100644 --- a/docs/zh_CN/image_recognition_pipeline/feature_extraction.md +++ b/docs/zh_CN/image_recognition_pipeline/feature_extraction.md @@ -1,182 +1,247 @@ +简体中文|[English](../../en/image_recognition_pipeline/feature_extraction_en.md) # 特征提取 ## 目录 -- [1. 简介](#1) -- [2. 网络结构](#2) -- [3. 通用识别模型](#3) -- [4. 自定义特征提取](#4) - - [4.1 数据准备](#4.1) - - [4.2 模型训练](#4.2) - - [4.3 模型评估](#4.3) - - [4.4 模型推理](#4.4) - - [4.4.1 导出推理模型](#4.4.1) - - [4.4.2 获取特征向量](#4.4.2) +- [1. 摘要](#1-摘要) +- [2. 介绍](#2-介绍) +- [3. 方法](#3-方法) + - [3.1 Backbone](#31-backbone) + - [3.2 Neck](#32-neck) + - [3.3 Head](#33-head) + - [3.4 Loss](#34-loss) +- [4. 实验部分](#4-实验部分) +- [5. 自定义特征提取](#5-自定义特征提取) + - [5.1 数据准备](#51-数据准备) + - [5.2 模型训练](#52-模型训练) + - [5.3 模型评估](#53-模型评估) + - [5.4 模型推理](#54-模型推理) + - [5.4.1 导出推理模型](#541-导出推理模型) + - [5.4.2 获取特征向量](#542-获取特征向量) +- [6. 总结](#6-总结) +- [7. 参考文献](#7-参考文献) -## 1. 简介 +## 1. 摘要 -特征提取是图像识别中的关键一环,它的作用是将输入的图片转化为固定维度的特征向量,用于后续的[向量检索](./vector_search.md)。好的特征需要具备相似度保持性,即在特征空间中,相似度高的图片对其特征相似度要比较高(距离比较近),相似度低的图片对,其特征相似度要比较小(距离比较远)。[Deep Metric Learning](../algorithm_introduction/metric_learning.md)用以研究如何通过深度学习的方法获得具有强表征能力的特征。 +特征提取是图像识别中的关键一环,它的作用是将输入的图片转化为固定维度的特征向量,用于后续的[向量检索](./vector_search.md)。一个好的特征需要具备“相似度保持性”,即相似度高的图片对,其特征的相似度也比较高(特征空间中的距离比较近),相似度低的图片对,其特征相似度要比较低(特征空间中的距离比较远)。为此[Deep Metric Learning](../algorithm_introduction/metric_learning.md)领域内提出了不少方法用以研究如何通过深度学习来获得具有强表征能力的特征。 -## 2. 网络结构 +## 2. 介绍 + 为了图像识别任务的灵活定制,我们将整个网络分为 Backbone、 Neck、 Head 以及 Loss 部分,整体结构如下图所示: ![](../../images/feature_extraction_framework.png) 图中各个模块的功能为: -- **Backbone**: 指定所使用的骨干网络。 值得注意的是,PaddleClas 提供的基于 ImageNet 的预训练模型,最后一层的输出为 1000,我们需要依据所需的特征维度定制最后一层的输出。 -- **Neck**: 用以特征增强及特征维度变换。这儿的 Neck,可以是一个简单的 Linear Layer,用来做特征维度变换;也可以是较复杂的 FPN 结构,用以做特征增强。 -- **Head**: 用来将 feature 转化为 logits。除了常用的 Fc Layer 外,还可以替换为 cosmargin, arcmargin, circlemargin 等模块。 -- **Loss**: 指定所使用的 Loss 函数。我们将 Loss 设计为组合 loss 的形式,可以方便地将 Classification Loss 和 Pair_wise Loss 组合在一起。 +- **Backbone**: 用于提取输入图像初步特征的骨干网络,一般由配置文件中的 [`Backbone`](../../../ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml#L26-L29) 以及 [`BackboneStopLayer`](../../../ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml#L30-L31) 字段共同指定。 +- **Neck**: 用以特征增强及特征维度变换。可以是一个简单的 FC Layer,用来做特征维度变换;也可以是较复杂的 FPN 结构,用以做特征增强,一般由配置文件中的 [`Neck`](../../../ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml#L32-L35)字段指定。 +- **Head**: 用来将 feature 转化为 logits,让模型在训练阶段能以分类任务的形式进行训练。除了常用的 FC Layer 外,还可以替换为 cosmargin, arcmargin, circlemargin 等模块,一般由配置文件中的 [`Head`](../../../ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml#L36-L41)字段指定。 +- **Loss**: 指定所使用的 Loss 函数。我们将 Loss 设计为组合 loss 的形式,可以方便地将 Classification Loss 和 Metric learning Loss 组合在一起,一般由配置文件中的 [`Loss`](../../../ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml#L44-L50)字段指定。 -## 3. 通用识别模型 +## 3. 方法 + +### 3.1 Backbone + +Backbone 部分采用了 [PP_LCNet_x2_5](../models/PP-LCNet.md),其针对Intel CPU端的性能优化探索了多个有效的结构设计方案,最终实现了在不增加推理时间的情况下,进一步提升模型的性能,最终大幅度超越现有的 SOTA 模型。 + +### 3.2 Neck + +Neck 部分采用了 [FC Layer](../../../ppcls/arch/gears/fc.py),对 Backbone 抽取得到的特征进行降维,减少了特征存储的成本与计算量。 + +### 3.3 Head + +Head 部分选用 [ArcMargin](../../../ppcls/arch/gears/arcmargin.py),在训练时通过指定margin,增大同类特征之间的角度差异再进行分类,进一步提升抽取特征的表征能力。 -在 PP-Shitu 中, 我们采用 [PP_LCNet_x2_5](../models/PP-LCNet.md) 作为骨干网络 Neck 部分选用 Linear Layer, Head 部分选用 [ArcMargin](../../../ppcls/arch/gears/arcmargin.py),Loss 部分选用 CELoss,详细的配置文件见[通用识别配置文件](../../../ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml)。其中,训练数据为如下 7 个公开数据集的汇总: +### 3.4 Loss -| 数据集 | 数据量 | 类别数 | 场景 | 数据集地址 | -| :------------: | :-------------: | :-------: | :-------: | :--------: | -| Aliproduct | 2498771 | 50030 | 商品 | [地址](https://retailvisionworkshop.github.io/recognition_challenge_2020/) | -| GLDv2 | 1580470 | 81313 | 地标 | [地址](https://github.com/cvdfoundation/google-landmark) | -| VeRI-Wild | 277797 | 30671 | 车辆 | [地址](https://github.com/PKU-IMRE/VERI-Wild)| -| LogoDet-3K | 155427 | 3000 | Logo | [地址](https://github.com/Wangjing1551/LogoDet-3K-Dataset) | -| iCartoonFace | 389678 | 5013 | 动漫人物 | [地址](http://challenge.ai.iqiyi.com/detail?raceId=5def69ace9fcf68aef76a75d) | -| SOP | 59551 | 11318 | 商品 | [地址](https://cvgl.stanford.edu/projects/lifted_struct/) | -| Inshop | 25882 | 3997 | 商品 | [地址](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion.html) | -| **Total** | **5M** | **185K** | ---- | ---- | +Loss 部分选用 [Cross entropy loss](../../../ppcls/loss/celoss.py),在训练时以分类任务的损失函数来指导网络进行优化。详细的配置文件见[通用识别配置文件](../../../ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml)。 + + + +## 4. 实验部分 + +训练数据为如下 7 个公开数据集的汇总: + +| 数据集 | 数据量 | 类别数 | 场景 | 数据集地址 | +| :----------: | :-----: | :------: | :------: | :--------------------------------------------------------------------------: | +| Aliproduct | 2498771 | 50030 | 商品 | [地址](https://retailvisionworkshop.github.io/recognition_challenge_2020/) | +| GLDv2 | 1580470 | 81313 | 地标 | [地址](https://github.com/cvdfoundation/google-landmark) | +| VeRI-Wild | 277797 | 30671 | 车辆 | [地址](https://github.com/PKU-IMRE/VERI-Wild) | +| LogoDet-3K | 155427 | 3000 | Logo | [地址](https://github.com/Wangjing1551/LogoDet-3K-Dataset) | +| iCartoonFace | 389678 | 5013 | 动漫人物 | [地址](http://challenge.ai.iqiyi.com/detail?raceId=5def69ace9fcf68aef76a75d) | +| SOP | 59551 | 11318 | 商品 | [地址](https://cvgl.stanford.edu/projects/lifted_struct/) | +| Inshop | 25882 | 3997 | 商品 | [地址](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion.html) | +| **Total** | **5M** | **185K** | ---- | ---- | 最终的模型效果如下表所示: -| 模型 | Aliproduct | VeRI-Wild | LogoDet-3K | iCartoonFace | SOP | Inshop | Latency(ms) | -| :----------: | :---------: | :-------: | :-------: | :--------: | :--------: | :--------: | :--------: | -PP-LCNet-2.5x | 0.839 | 0.888 | 0.861 | 0.841 | 0.793 | 0.892 | 5.0 +| 模型 | Aliproduct | VeRI-Wild | LogoDet-3K | iCartoonFace | SOP | Inshop | Latency(ms) | +| :-----------------------------: | :--------: | :-------: | :--------: | :----------: | :---: | :----: | :---------: | +| GeneralRecognition_PPLCNet_x2_5 | 0.839 | 0.888 | 0.861 | 0.841 | 0.793 | 0.892 | 5.0 | +* 预训练模型地址:[通用识别预训练模型](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams) * 采用的评测指标为:`Recall@1` * 速度评测机器的 CPU 具体信息为:`Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz` * 速度指标的评测条件为: 开启 MKLDNN, 线程数设置为 10 -* 预训练模型地址:[通用识别预训练模型](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams) - + -## 4. 自定义特征提取 +## 5. 自定义特征提取 -自定义特征提取,是指依据自己的任务,重新训练特征提取模型。主要包含四个步骤:1)数据准备;2)模型训练;3)模型评估;4)模型推理。 +自定义特征提取,是指依据自己的任务,重新训练特征提取模型。 - +下面基于`GeneralRecognition_PPLCNet_x2_5.yaml`配置文件,介绍主要的四个步骤:1)数据准备;2)模型训练;3)模型评估;4)模型推理 -### 4.1 数据准备 -首先,需要基于任务定制自己的数据集。数据集格式参见[格式说明](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/data_preparation/recognition_dataset.md#%E6%95%B0%E6%8D%AE%E9%9B%86%E6%A0%BC%E5%BC%8F%E8%AF%B4%E6%98%8E)。在启动模型训练之前,需要在配置文件中修改数据配置相关的内容, 主要包括数据集的地址以及类别数量。对应到配置文件中的位置如下所示: -``` - Head: - name: ArcMargin - embedding_size: 512 - class_num: 185341 #此处表示类别数 -``` -``` - Train: - dataset: - name: ImageNetDataset - image_root: ./dataset/ #此处表示train数据所在的目录 - cls_label_path: ./dataset/train_reg_all_data.txt #此处表示train数据集label文件的地址 -``` -``` - Query: - dataset: - name: VeriWild - image_root: ./dataset/Aliproduct/. #此处表示query数据集所在的目录 - cls_label_path: ./dataset/Aliproduct/val_list.txt. #此处表示query数据集label文件的地址 -``` -``` - Gallery: - dataset: - name: VeriWild - image_root: ./dataset/Aliproduct/ #此处表示gallery数据集所在的目录 - cls_label_path: ./dataset/Aliproduct/val_list.txt. #此处表示gallery数据集label文件的地址 -``` - - + -### 4.2 模型训练 +### 5.1 数据准备 -- 单机单卡训练 -```shell -export CUDA_VISIBLE_DEVICES=0 -python tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -``` -- 单机多卡训练 -```shell -export CUDA_VISIBLE_DEVICES=0,1,2,3 -python -m paddle.distributed.launch \ - --gpus="0,1,2,3" tools/train.py \ - -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -``` -**注意:** -配置文件中默认采用`在线评估`的方式,如果你想加快训练速度,去除`在线评估`,只需要在上述命令后面,增加 `-o eval_during_train=False`。训练完毕后,在 output 目录下会生成最终模型文件 `latest`,`best_model` 和训练日志文件 `train.log`。其中,`best_model` 用来存储当前评测指标下的最佳模型;`latest` 用来存储最新生成的模型, 方便在任务中断的情况下从断点位置启动训练。 +首先需要基于任务定制自己的数据集。数据集格式与文件结构详见[数据集格式说明](../data_preparation/recognition_dataset.md)。 -- 断点续训: -```shell -export CUDA_VISIBLE_DEVICES=0,1,2,3 -python -m paddle.distributed.launch \ - --gpus="0,1,2,3" tools/train.py \ - -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ - -o Global.checkpoint="output/RecModel/latest" -``` +准备完毕之后还需要在配置文件中修改数据配置相关的内容, 主要包括数据集的地址以及类别数量。对应到配置文件中的位置如下所示: - +- 修改类别数: + ```yaml + Head: + name: ArcMargin + embedding_size: 512 + class_num: 185341 # 此处表示类别数 + ``` +- 修改训练数据集配置: + ```yaml + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ # 此处表示train数据所在的目录 + cls_label_path: ./dataset/train_reg_all_data.txt # 此处表示train数据集label文件的地址 + ``` +- 修改评估数据集中query数据配置: + ```yaml + Query: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ # 此处表示query数据集所在的目录 + cls_label_path: ./dataset/Aliproduct/val_list.txt # 此处表示query数据集label文件的地址 + ``` +- 修改评估数据集中gallery数据配置: + ```yaml + Gallery: + dataset: + name: VeriWild + image_root: ./dataset/Aliproduct/ # 此处表示gallery数据集所在的目录 + cls_label_path: ./dataset/Aliproduct/val_list.txt # 此处表示gallery数据集label文件的地址 + ``` + + + +### 5.2 模型训练 + +模型训练主要包括启动训练和断点恢复训练的功能 -### 4.3 模型评估 +- 单机单卡训练 + ```shell + export CUDA_VISIBLE_DEVICES=0 + python3.7 tools/train.py \ + -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml + ``` +- 单机多卡训练 + ```shell + export CUDA_VISIBLE_DEVICES=0,1,2,3 + python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" tools/train.py \ + -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml + ``` +**注意:** +配置文件中默认采用`在线评估`的方式,如果你想加快训练速度,可以关闭`在线评估`功能,只需要在上述命令的后面,增加 `-o Global.eval_during_train=False`。 + +训练完毕后,在 output 目录下会生成最终模型文件 `latest.pdparams`,`best_model.pdarams` 和训练日志文件 `train.log`。其中,`best_model` 保存了当前评测指标下的最佳模型,`latest` 用来保存最新生成的模型, 方便在任务中断的情况下从断点位置恢复训练。通过在上述训练命令的末尾加上`-o Global.checkpoint="path_to_resume_checkpoint"`即可从断点恢复训练,示例如下。 + +- 单机单卡断点恢复训练 + ```shell + export CUDA_VISIBLE_DEVICES=0 + python3.7 tools/train.py \ + -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ + -o Global.checkpoint="output/RecModel/latest" + ``` +- 单机多卡断点恢复训练 + ```shell + export CUDA_VISIBLE_DEVICES=0,1,2,3 + python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" tools/train.py \ + -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ + -o Global.checkpoint="output/RecModel/latest" + ``` + + + +### 5.3 模型评估 + +除了训练过程中对模型进行的在线评估,也可以手动启动评估程序来获得指定的模型的精度指标。 - 单卡评估 -```shell -export CUDA_VISIBLE_DEVICES=0 -python tools/eval.py \ --c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ --o Global.pretrained_model="output/RecModel/best_model" -``` + ```shell + export CUDA_VISIBLE_DEVICES=0 + python3.7 tools/eval.py \ + -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ + -o Global.pretrained_model="output/RecModel/best_model" + ``` - 多卡评估 -```shell -export CUDA_VISIBLE_DEVICES=0,1,2,3 -python -m paddle.distributed.launch \ - --gpus="0,1,2,3" tools/eval.py \ - -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ - -o Global.pretrained_model="output/RecModel/best_model" -``` -**推荐:** 建议使用多卡评估。多卡评估方式可以利用多卡并行计算快速得到整体数据集的特征集合,能够加速评估的过程。 + ```shell + export CUDA_VISIBLE_DEVICES=0,1,2,3 + python3.7 -m paddle.distributed.launch \ + --gpus="0,1,2,3" tools/eval.py \ + -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ + -o Global.pretrained_model="output/RecModel/best_model" + ``` +**注:** 建议使用多卡评估。该方式可以利用多卡并行计算快速得到全部数据的特征,能够加速评估的过程。 - + -### 4.4 模型推理 +### 5.4 模型推理 -推理过程包括两个步骤: 1)导出推理模型; 2)获取特征向量 +推理过程包括两个步骤: 1)导出推理模型;2)模型推理以获取特征向量 - +#### 5.4.1 导出推理模型 -#### 4.4.1 导出推理模型 - -``` -python tools/export_model.py \ +首先需要将 `*.pdparams` 模型文件转换成 inference 格式,转换命令如下。 +```shell +python3.7 tools/export_model.py \ -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml \ -o Global.pretrained_model="output/RecModel/best_model" ``` -生成的推理模型位于 `inference` 目录,里面包含三个文件,分别为 `inference.pdmodel`、`inference.pdiparams`、`inference.pdiparams.info`。 -其中: `inference.pdmodel` 用来存储推理模型的结构, `inference.pdiparams` 和 `inference.pdiparams.info` 用来存储推理模型相关的参数信息。 +生成的推理模型默认位于 `PaddleClas/inference` 目录,里面包含三个文件,分别为 `inference.pdmodel`、`inference.pdiparams`、`inference.pdiparams.info`。 +其中`inference.pdmodel` 用来存储推理模型的结构, `inference.pdiparams` 和 `inference.pdiparams.info` 用来存储推理模型相关的参数信息。 - +#### 5.4.2 获取特征向量 -#### 4.4.2 获取特征向量 +使用上一步转换得到的 inference 格式模型,将输入图片转换为对应的特征向量,推理命令如下。 -``` +```shell cd deploy -python python/predict_rec.py \ +python3.7 python/predict_rec.py \ -c configs/inference_rec.yaml \ -o Global.rec_inference_model_dir="../inference" ``` 得到的特征输出格式如下图所示: ![](../../images/feature_extraction_output.png) -在实际使用过程中,单纯得到特征往往并不能够满足业务的需求。如果想进一步通过特征检索来进行图像识别,可以参照文档[向量检索](./vector_search.md)。 +在实际使用过程中,仅仅得到特征可能并不能满足业务需求。如果想进一步通过特征检索来进行图像识别,可以参照文档[向量检索](./vector_search.md)。 + + + +## 6. 总结 + +特征提取模块作为图像识别中的关键一环,在网络结构的设计,损失函数的选取上有很大的改进空间。不同的数据集类型有各自不同的特点,如行人重识别、商品识别、人脸识别数据集的分布、图片内容都不尽相同。学术界根据这些特点提出了各种各样的方法,如PCB、MGN、ArcFace、CircleLoss、TripletLoss等,围绕的还是增大类间差异、减少类内差异的最终目标,从而有效地应对各种真实场景数据。 + + + +## 7. 参考文献 + +1. [PP-LCNet: A Lightweight CPU Convolutional Neural Network](https://arxiv.org/pdf/2109.15099.pdf) +2. [ArcFace: Additive Angular Margin Loss for Deep Face Recognition](https://arxiv.org/abs/1801.07698) diff --git a/docs/zh_CN/image_recognition_pipeline/mainbody_detection.md b/docs/zh_CN/image_recognition_pipeline/mainbody_detection.md index f3d7989029ae763523cebf3d504920863b356adc..828fdf4f1f017d524aa9ebea1f1a409dee0eaf43 100644 --- a/docs/zh_CN/image_recognition_pipeline/mainbody_detection.md +++ b/docs/zh_CN/image_recognition_pipeline/mainbody_detection.md @@ -19,9 +19,13 @@ - [3.3 配置文件改动和说明](#3.3) - [3.4 启动训练](#3.4) - [3.5 模型预测与调试](#3.5) - - [3.6 模型导出与预测部署](#3.6) +- [4. 模型推理部署](#4) + - [4.1 推理模型准备](#4.1) + - [4.2 基于python预测引擎推理](#4.2) + - [4.3 其他推理方式](#4.3) - + + ## 1. 数据集 @@ -37,7 +41,7 @@ 在实际训练的过程中,将所有数据集混合在一起。由于是主体检测,这里将所有标注出的检测框对应的类别都修改为 `前景` 的类别,最终融合的数据集中只包含 1 个类别,即前景。 - + ## 2. 模型选择 @@ -55,7 +59,7 @@ * 速度评测机器的 CPU 具体信息为:`Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz`,速度指标为开启 mkldnn,线程数设置为 10 测试得到。 * 主体检测的预处理过程较为耗时,平均每张图在上述机器上的时间在 40~55 ms 左右,没有包含在上述的预测耗时统计中。 - + ### 2.1 轻量级主体检测模型 @@ -72,7 +76,7 @@ PicoDet 由 [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection) 在轻量级主体检测任务中,为了更好地兼顾检测速度与效果,我们使用 PPLCNet_x2_5 作为主体检测模型的骨干网络,同时将训练与预测的图像尺度修改为了 640x640,其余配置与 [picodet_lcnet_1_5x_416_coco.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/picodet/more_config/picodet_lcnet_1_5x_416_coco.yml) 完全一致。将数据集更换为自定义的主体检测数据集,进行训练,最终得到检测模型。 - + ### 2.2 服务端主体检测模型 @@ -93,13 +97,13 @@ PP-YOLO 由 [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection) 在服务端主体检测任务中,为了保证检测效果,我们使用 ResNet50vd-DCN 作为检测模型的骨干网络,使用配置文件 [ppyolov2_r50vd_dcn_365e_coco.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml),更换为自定义的主体检测数据集,进行训练,最终得到检测模型。 - + ## 3. 模型训练 本节主要介绍怎样基于 PaddleDetection,基于自己的数据集,训练主体检测模型。 - + ### 3.1 环境准备 @@ -116,7 +120,7 @@ pip install -r requirements.txt 更多安装教程,请参考: [安装文档](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md) - + ### 3.2 数据准备 @@ -128,7 +132,7 @@ pip install -r requirements.txt [{u'id': 1, u'name': u'foreground', u'supercategory': u'foreground'}] ``` - + ### 3.3 配置文件改动和说明 @@ -154,7 +158,7 @@ ppyolov2_reader.yml:主要说明数据读取器配置,如 batch size,并 此外,也可以根据实际情况,修改上述文件,比如,如果显存溢出,可以将 batch size 和学习率等比缩小等。 - + ### 3.4 启动训练 @@ -198,7 +202,7 @@ python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppy 注意:如果遇到 "`Out of memory error`" 问题, 尝试在 `ppyolov2_reader.yml` 文件中调小 `batch_size`,同时等比例调小学习率。 - + ### 3.5 模型预测与调试 @@ -211,9 +215,11 @@ python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer `--draw_threshold` 是个可选参数. 根据 [NMS](https://ieeexplore.ieee.org/document/1699659) 的计算,不同阈值会产生不同的结果 `keep_top_k` 表示设置输出目标的最大数量,默认值为 100,用户可以根据自己的实际情况进行设定。 - + +## 4. 模型推理部署 -### 3.6 模型导出与预测部署。 + +### 4.1 推理模型准备 执行导出模型脚本: @@ -225,15 +231,21 @@ python tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml 注意: `PaddleDetection` 导出的 inference 模型的文件格式为 `model.xxx`,这里如果希望与 PaddleClas 的 inference 模型文件格式保持一致,需要将其 `model.xxx` 文件修改为 `inference.xxx` 文件,用于后续主体检测的预测部署。 -更多模型导出教程,请参考: [EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md) +更多模型导出教程,请参考: [EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/deploy/EXPORT_MODEL.md) 最终,目录 `inference/ppyolov2_r50vd_dcn_365e_coco` 中包含 `inference.pdiparams`, `inference.pdiparams.info` 以及 `inference.pdmodel` 文件,其中 `inference.pdiparams` 为保存的 inference 模型权重文件,`inference.pdmodel` 为保存的 inference 模型结构文件。 + +### 4.2 基于python预测引擎推理 导出模型之后,在主体检测与识别任务中,就可以将检测模型的路径更改为该 inference 模型路径,完成预测。 以商品识别为例,其配置文件为 [inference_product.yaml](../../../deploy/configs/inference_product.yaml),修改其中的 `Global.det_inference_model_dir` 字段为导出的主体检测 inference 模型目录,参考[图像识别快速开始教程](../quick_start/quick_start_recognition.md),即可完成商品检测与识别过程。 + +### 4.3 其他推理方式 +其他推理方法,如C++推理部署、PaddleServing部署等请参考[检测模型推理部署](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/deploy/README.md)。 + ### FAQ diff --git a/docs/zh_CN/image_recognition_pipeline/vector_search.md b/docs/zh_CN/image_recognition_pipeline/vector_search.md index 6cf4d207ddfa5f3cade2ac727b12df2038f3943c..be0bf785c9b4844a9e6d2ae744ceb37c5ddbfed7 100644 --- a/docs/zh_CN/image_recognition_pipeline/vector_search.md +++ b/docs/zh_CN/image_recognition_pipeline/vector_search.md @@ -1,5 +1,21 @@ # 向量检索 +## 目录 + +- [1. 向量检索应用场景介绍](#1) +- [2. 向量检索算法介绍](#2) + - [2.1 HNSW](#2.1) + - [2.2 IVF](#2.2) + - [2.3 FLAT](#2.3) +- [3. 检索库安装](#3) +- [4. 使用及配置文档介绍](#4) + - [4.1 建库及配置文件参数](#4.1) + - [4.2 检索配置文件参数](#4.2) + + + +## 1. 向量检索应用场景介绍 + 向量检索技术在图像识别、图像检索中应用比较广泛。其主要目标是,对于给定的查询向量,在已经建立好的向量库中,与库中所有的待查询向量,进行特征向量的相似度或距离计算,得到相似度排序。在图像识别系统中,我们使用 [Faiss](https://github.com/facebookresearch/faiss) 对此部分进行支持,具体信息请详查 [Faiss 官网](https://github.com/facebookresearch/faiss)。`Faiss` 主要有以下优势 - 适配性好:支持 Windos、Linux、MacOS 系统 @@ -20,17 +36,33 @@ -------------------------- -## 目录 + +## 2. 使用的检索算法 + +目前 `PaddleClas` 中检索模块,支持三种检索算法**HNSW32**、**IVF**、**FLAT**。每种检索算法,满足不同场景。其中 `HNSW32` 为默认方法,此方法的检索精度、检索速度可以取得一个较好的平衡,具体算法介绍可以查看[官方文档](https://github.com/facebookresearch/faiss/wiki)。 + + +### 2.1 HNSW方法 + +此方法为图索引方法,如下图所示,在建立索引的时候,分为不同的层,所以检索精度较高,速度较快,但是特征库只支持添加图像功能,不支持删除图像特征功能。基于图的向量检索算法在向量检索的评测中性能都是比较优异的。如果比较在乎检索算法的效率,而且可以容忍一定的空间成本,多数场景下比较推荐基于图的检索算法。而HNSW是一种典型的,应用广泛的图算法,很多分布式检索引擎都对HNSW算法进行了分布式改造,以应用于高并发,大数据量的线上查询。此方法为默认方法。 +
+ +
+ + +### 2.2 IVF -- [1. 检索库安装](#1) -- [2. 使用的检索算法](#2) -- [3. 使用及配置文档介绍](#3) - - [3.1 建库及配置文件参数](#3.1) - - [3.2 检索配置文件参数](#3.2) +一种倒排索引检索方法。速度较快,但是精度略低。特征库支持增加、删除图像特征功能。IVF主要利用倒排的思想保存每个聚类中心下的向量,每次查询向量的时候找到最近的几个中心,分别搜索这几个中心下的向量。通过减小搜索范围,大大提升搜索效率。 - + +### 2.3 FLAT -## 1. 检索库安装 +暴力检索算法。精度最高,但是数据量大时,检索速度较慢。特征库支持增加、删除图像特征功能。 + + + + +## 3. 检索库安装 `Faiss` 具体安装方法如下: @@ -40,27 +72,16 @@ pip install faiss-cpu==1.7.1post2 若使用时,不能正常引用,则 `uninstall` 之后,重新 `install`,尤其是 `windows` 下。 - - -## 2. 使用的检索算法 - -目前 `PaddleClas` 中检索模块,支持如下三种检索算法 - -- **HNSW32**: 一种图索引方法。检索精度较高,速度较快。但是特征库只支持添加图像功能,不支持删除图像特征功能。(默认方法) -- **IVF**:倒排索引检索方法。速度较快,但是精度略低。特征库支持增加、删除图像特征功能。 -- **FLAT**: 暴力检索算法。精度最高,但是数据量大时,检索速度较慢。特征库支持增加、删除图像特征功能。 - -每种检索算法,满足不同场景。其中 `HNSW32` 为默认方法,此方法的检索精度、检索速度可以取得一个较好的平衡,具体算法介绍可以查看[官方文档](https://github.com/facebookresearch/faiss/wiki)。 - + -## 3. 使用及配置文档介绍 +## 4. 使用及配置文档介绍 -涉及检索模块配置文件位于:`deploy/configs/` 下,其中 `build_*.yaml` 是建立特征库的相关配置文件,`inference_*.yaml` 是检索或者分类的推理配置文件。 +涉及检索模块配置文件位于:`deploy/configs/` 下,其中 `inference_*.yaml` 是检索或者分类的推理配置文件,同时也是建立特征库的相关配置文件。 - + -### 3.1 建库及配置文件参数 +### 4.1 建库及配置文件参数 建库的具体操作如下: @@ -68,14 +89,14 @@ pip install faiss-cpu==1.7.1post2 # 进入 deploy 目录 cd deploy # yaml 文件根据需要改成自己所需的具体 yaml 文件 -python python/build_gallery.py -c configs/build_***.yaml +python python/build_gallery.py -c configs/inference_***.yaml ``` 其中 `yaml` 文件的建库的配置如下,在运行时,请根据实际情况进行修改。建库操作会将根据 `data_file` 的图像列表,将 `image_root` 下的图像进行特征提取,并在 `index_dir` 下进行存储,以待后续检索使用。 其中 `data_file` 文件存储的是图像文件的路径和标签,每一行的格式为:`image_path label`。中间间隔以 `yaml` 文件中 `delimiter` 参数作为间隔。 -关于特征提取的具体模型参数,可查看 `yaml` 文件。 +关于特征提取的具体模型参数,可查看 `yaml` 文件。注意下面的配置参数只列举了建立索引库相关部分。 ```yaml # indexing engine config @@ -88,6 +109,7 @@ IndexProcess: delimiter: "\t" dist_type: "IP" embedding_size: 512 + batch_size: 32 ``` - **index_method**:使用的检索算法。目前支持三种,HNSW32、IVF、Flat @@ -98,23 +120,29 @@ IndexProcess: - **delimiter**:**data_file** 中每一行的间隔符 - **dist_type**: 特征匹配过程中使用的相似度计算方式。例如 `IP` 内积相似度计算方式,`L2` 欧式距离计算方法 - **embedding_size**:特征维度 +- **batch_size**:建立特征库时,特征提取的`batch_size` - + + +### 4.2 检索配置文件参数 -### 3.2 检索配置文件参数 将检索的过程融合到 `PP-ShiTu` 的整体流程中,请参考 [README](../../../README_ch.md) 中 `PP-ShiTu 图像识别系统介绍` 部分。检索具体使用操作请参考[识别快速开始文档](../quick_start/quick_start_recognition.md)。 其中,检索部分配置如下,整体检索配置文件,请参考 `deploy/configs/inference_*.yaml` 文件。 +注意:此部分参数只是列举了离线检索相关部分参数。 + ```yaml IndexProcess: index_dir: "./recognition_demo_data_v1.1/gallery_logo/index/" return_k: 5 score_thres: 0.5 + hamming_radius: 100 ``` 与建库配置文件不同,新参数主要如下: - `return_k`: 检索结果返回 `k` 个结果 - `score_thres`: 检索匹配的阈值 +- `hamming_radius`: 汉明距离半径。此参数只有在使用二值特征模型,`dist_type`设置为`hamming`时才能生效。具体二值特征模型使用方法请参考[哈希编码](./deep_hashing.md) diff --git a/docs/zh_CN/inference_deployment/classification_serving_deploy.md b/docs/zh_CN/inference_deployment/classification_serving_deploy.md new file mode 100644 index 0000000000000000000000000000000000000000..3d9c999625535b9a70c2ba443512717bcb3a975c --- /dev/null +++ b/docs/zh_CN/inference_deployment/classification_serving_deploy.md @@ -0,0 +1,240 @@ +简体中文 | [English](../../en/inference_deployment/classification_serving_deploy_en.md) + +# 分类模型服务化部署 + +## 目录 + +- [1. 简介](#1-简介) +- [2. Serving 安装](#2-serving-安装) +- [3. 图像分类服务部署](#3-图像分类服务部署) +- [3.1 模型转换](#31-模型转换) +- [3.2 服务部署和请求](#32-服务部署和请求) + - [3.2.1 Python Serving](#321-python-serving) + - [3.2.2 C++ Serving](#322-c-serving) +- [4.FAQ](#4faq) + + +## 1. 简介 + +[Paddle Serving](https://github.com/PaddlePaddle/Serving) 旨在帮助深度学习开发者轻松部署在线预测服务,支持一键部署工业级的服务能力、客户端和服务端之间高并发和高效通信、并支持多种编程语言开发客户端。 + +该部分以 HTTP 预测服务部署为例,介绍怎样在 PaddleClas 中使用 PaddleServing 部署模型服务。目前只支持 Linux 平台部署,暂不支持 Windows 平台。 + + +## 2. Serving 安装 + +Serving 官网推荐使用 docker 安装并部署 Serving 环境。首先需要拉取 docker 环境并创建基于 Serving 的 docker。 + +```shell +# 启动GPU docker +docker pull paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel +nvidia-docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel bash +nvidia-docker exec -it test bash + +# 启动CPU docker +docker pull paddlepaddle/serving:0.7.0-devel +docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-devel bash +docker exec -it test bash +``` + +进入 docker 后,需要安装 Serving 相关的 python 包。 +```shell +python3.7 -m pip install paddle-serving-client==0.7.0 +python3.7 -m pip install paddle-serving-app==0.7.0 +python3.7 -m pip install faiss-cpu==1.7.1post2 + +#若为CPU部署环境: +python3.7 -m pip install paddle-serving-server==0.7.0 # CPU +python3.7 -m pip install paddlepaddle==2.2.0 # CPU + +#若为GPU部署环境 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post102 # GPU with CUDA10.2 + TensorRT6 +python3.7 -m pip install paddlepaddle-gpu==2.2.0 # GPU with CUDA10.2 + +#其他GPU环境需要确认环境再选择执行哪一条 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post101 # GPU with CUDA10.1 + TensorRT6 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post112 # GPU with CUDA11.2 + TensorRT8 +``` + +* 如果安装速度太慢,可以通过 `-i https://pypi.tuna.tsinghua.edu.cn/simple` 更换源,加速安装过程。 +* 其他环境配置安装请参考:[使用Docker安装Paddle Serving](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_CN.md) + + + +## 3. 图像分类服务部署 + +下面以经典的 ResNet50_vd 模型为例,介绍如何部署图像分类服务。 + + +### 3.1 模型转换 + +使用 PaddleServing 做服务化部署时,需要将保存的 inference 模型转换为 Serving 模型。 +- 进入工作目录: + ```shell + cd deploy/paddleserving + ``` +- 下载并解压 ResNet50_vd 的 inference 模型: + ```shell + # 下载 ResNet50_vd inference 模型 + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar + # 解压 ResNet50_vd inference 模型 + tar xf ResNet50_vd_infer.tar + ``` +- 用 paddle_serving_client 命令把下载的 inference 模型转换成易于 Server 部署的模型格式: + ```shell + # 转换 ResNet50_vd 模型 + python3.7 -m paddle_serving_client.convert \ + --dirname ./ResNet50_vd_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./ResNet50_vd_serving/ \ + --serving_client ./ResNet50_vd_client/ + ``` + 上述命令中参数具体含义如下表所示 + | 参数 | 类型 | 默认值 | 描述 | + | ----------------- | ---- | ------------------ | ------------------------------------------------------------ | + | `dirname` | str | - | 需要转换的模型文件存储路径,Program结构文件和参数文件均保存在此目录。 | + | `model_filename` | str | None | 存储需要转换的模型Inference Program结构的文件名称。如果设置为None,则使用 `__model__` 作为默认的文件名 | + | `params_filename` | str | None | 存储需要转换的模型所有参数的文件名称。当且仅当所有模型参数被保>存在一个单独的二进制文件中,它才需要被指定。如果模型参数是存储在各自分离的文件中,设置它的值为None | + | `serving_server` | str | `"serving_server"` | 转换后的模型文件和配置文件的存储路径。默认值为serving_server | + | `serving_client` | str | `"serving_client"` | 转换后的客户端配置文件存储路径。默认值为serving_client | + + ResNet50_vd 推理模型转换完成后,会在当前文件夹多出 `ResNet50_vd_serving` 和 `ResNet50_vd_client` 的文件夹,具备如下结构: + ```shell + ├── ResNet50_vd_serving/ + │ ├── inference.pdiparams + │ ├── inference.pdmodel + │ ├── serving_server_conf.prototxt + │ └── serving_server_conf.stream.prototxt + │ + └── ResNet50_vd_client/ + ├── serving_client_conf.prototxt + └── serving_client_conf.stream.prototxt + ``` + +- Serving 为了兼容不同模型的部署,提供了输入输出重命名的功能。让不同的模型在推理部署时,只需要修改配置文件的 `alias_name` 即可,无需修改代码即可完成推理部署。因此在转换完毕后需要分别修改 `ResNet50_vd_serving` 下的文件 `serving_server_conf.prototxt` 和 `ResNet50_vd_client` 下的文件 `serving_client_conf.prototxt`,将 `fetch_var` 中 `alias_name:` 后的字段改为 `prediction`,修改后的 `serving_server_conf.prototxt` 和 `serving_client_conf.prototxt` 如下所示: + ```log + feed_var { + name: "inputs" + alias_name: "inputs" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 + } + fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "prediction" + is_lod_tensor: false + fetch_type: 1 + shape: 1000 + } + ``` + +### 3.2 服务部署和请求 + +paddleserving 目录包含了启动 pipeline 服务、C++ serving服务和发送预测请求的代码,主要包括: +```shell +__init__.py +classification_web_service.py # 启动pipeline服务端的脚本 +config.yml # 启动pipeline服务的配置文件 +pipeline_http_client.py # http方式发送pipeline预测请求的脚本 +pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 +readme.md # 分类模型服务化部署文档 +run_cpp_serving.sh # 启动C++ Serving部署的脚本 +test_cpp_serving_client.py # rpc方式发送C++ serving预测请求的脚本 +``` + +#### 3.2.1 Python Serving + +- 启动服务: + ```shell + # 启动服务,运行日志保存在 log.txt + python3.7 classification_web_service.py &>log.txt & + ``` + +- 发送请求: + ```shell + # 发送服务请求 + python3.7 pipeline_http_client.py + ``` + 成功运行后,模型预测的结果会打印在客户端中,如下所示: + ```log + {'err_no': 0, 'err_msg': '', 'key': ['label', 'prob'], 'value': ["['daisy']", '[0.9341402053833008]'], 'tensors': []} + ``` +- 关闭服务 +如果服务程序在前台运行,可以按下`Ctrl+C`来终止服务端程序;如果在后台运行,可以使用kill命令关闭相关进程,也可以在启动服务程序的路径下执行以下命令来终止服务端程序: + ```bash + python3.7 -m paddle_serving_server.serve stop + ``` + 执行完毕后出现`Process stopped`信息表示成功关闭服务。 + + +#### 3.2.2 C++ Serving + +与Python Serving不同,C++ Serving客户端调用 C++ OP来预测,因此在启动服务之前,需要编译并安装 serving server包,并设置 `SERVING_BIN`。 + +- 编译并安装Serving server包 + ```shell + # 进入工作目录 + cd PaddleClas/deploy/paddleserving + # 一键编译安装Serving server、设置 SERVING_BIN + source ./build_server.sh python3.7 + ``` + **注:**[build_server.sh](./build_server.sh#L55-L62)所设定的路径可能需要根据实际机器上的环境如CUDA、python版本等作一定修改,然后再编译。 + +- 修改客户端文件 `ResNet50_vd_client/serving_client_conf.prototxt` ,将 `feed_type:` 后的字段改为20,将第一个 `shape:` 后的字段改为1并删掉其余的 `shape` 字段。 + ```log + feed_var { + name: "inputs" + alias_name: "inputs" + is_lod_tensor: false + feed_type: 20 + shape: 1 + } + ``` +- 修改 [`test_cpp_serving_client`](./test_cpp_serving_client.py) 的部分代码 + 1. 修改 [`load_client_config`](./test_cpp_serving_client.py#L28) 处的代码,将 `load_client_config` 后的路径改为 `ResNet50_vd_client/serving_client_conf.prototxt` 。 + 2. 修改 [`feed={"inputs": image}`](./test_cpp_serving_client.py#L45) 处的代码,将 `inputs` 改为与 `ResNet50_vd_client/serving_client_conf.prototxt` 中 `feed_var` 字段下面的 `name` 一致。由于部分模型client文件中的 `name` 为 `x` 而不是 `inputs` ,因此使用这些模型进行C++ Serving部署时需要注意这一点。 + +- 启动服务: + ```shell + # 启动服务, 服务在后台运行,运行日志保存在 nohup.txt + # CPU部署 + bash run_cpp_serving.sh + # GPU部署并指定0号卡 + bash run_cpp_serving.sh 0 + ``` + +- 发送请求: + ```shell + # 发送服务请求 + python3.7 test_cpp_serving_client.py + ``` + 成功运行后,模型预测的结果会打印在客户端中,如下所示: + ```log + prediction: daisy, probability: 0.9341399073600769 + ``` +- 关闭服务: + 如果服务程序在前台运行,可以按下`Ctrl+C`来终止服务端程序;如果在后台运行,可以使用kill命令关闭相关进程,也可以在启动服务程序的路径下执行以下命令来终止服务端程序: + ```bash + python3.7 -m paddle_serving_server.serve stop + ``` + 执行完毕后出现`Process stopped`信息表示成功关闭服务。 + +## 4.FAQ + +**Q1**: 发送请求后没有结果返回或者提示输出解码报错 + +**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是: +```shell +unset https_proxy +unset http_proxy +``` + +**Q2**: 启动服务后没有任何反应 + +**A2**: 可以检查`config.yml`中`model_config`对应的路径是否存在,文件夹命名是否正确 + +更多的服务部署类型,如 `RPC 预测服务` 等,可以参考 Serving 的[github 官网](https://github.com/PaddlePaddle/Serving/tree/v0.9.0/examples) diff --git a/docs/zh_CN/inference_deployment/cpp_deploy_on_windows.md b/docs/zh_CN/inference_deployment/cpp_deploy_on_windows.md old mode 100755 new mode 100644 index b7089cbdb072b7365d44e13e9a0abd3d6f056483..03bf54348d0b48b64843eb89699dce4ffe64ce8a --- a/docs/zh_CN/inference_deployment/cpp_deploy_on_windows.md +++ b/docs/zh_CN/inference_deployment/cpp_deploy_on_windows.md @@ -5,13 +5,13 @@ PaddleClas 在 Windows 平台下基于 `Visual Studio 2019 Community` 进行了 ----- ## 目录 * [1. 前置条件](#1) - * [1.1 下载 PaddlePaddle C++ 预测库 paddle_inference_install_dir](#1.1) - * [1.2 安装配置 OpenCV](#1.2) + * [1.1 下载 PaddlePaddle C++ 预测库 paddle_inference_install_dir](#1.1) + * [1.2 安装配置 OpenCV](#1.2) * [2. 使用 Visual Studio 2019 编译](#2) * [3. 预测](#3) - * [3.1 准备 inference model](#3.1) - * [3.2 运行预测](#3.2) - * [3.3 注意事项](#3.3) + * [3.1 准备 inference model](#3.1) + * [3.2 运行预测](#3.2) + * [3.3 注意事项](#3.3) ## 1. 前置条件 diff --git a/docs/zh_CN/inference_deployment/export_model.md b/docs/zh_CN/inference_deployment/export_model.md index 1d8decb2837c0f68f71a6b022b05e574ce3ef83b..4e2d98e9310602b4df7c0bedee32be88b7cf8fef 100644 --- a/docs/zh_CN/inference_deployment/export_model.md +++ b/docs/zh_CN/inference_deployment/export_model.md @@ -17,7 +17,7 @@ PaddlePaddle 支持导出 inference 模型用于部署推理场景,相比于 ## 1. 环境准备 -首先请参考文档[安装 PaddlePaddle](../installation/install_paddle.md)和文档[安装 PaddleClas](../installation/install_paddleclas.md)配置运行环境。 +首先请参考文档文档[环境准备](../installation/install_paddleclas.md)配置运行环境。 ## 2. 分类模型导出 @@ -91,9 +91,9 @@ python3 tools/export_model.py \ 导出的 inference 模型文件可用于预测引擎进行推理部署,根据不同的部署方式/平台,可参考: -* [Python 预测](./python_deploy.md) -* [C++ 预测](./cpp_deploy.md)(目前仅支持分类模型) -* [Python Whl 预测](./whl_deploy.md)(目前仅支持分类模型) -* [PaddleHub Serving 部署](./paddle_hub_serving_deploy.md)(目前仅支持分类模型) -* [PaddleServing 部署](./paddle_serving_deploy.md) -* [PaddleLite 部署](./paddle_lite_deploy.md)(目前仅支持分类模型) +* [Python 预测](./inference/python_deploy.md) +* [C++ 预测](./inference/cpp_deploy.md)(目前仅支持分类模型) +* [Python Whl 预测](./inference/whl_deploy.md)(目前仅支持分类模型) +* [PaddleHub Serving 部署](./deployment/paddle_hub_serving_deploy.md)(目前仅支持分类模型) +* [PaddleServing 部署](./deployment/paddle_serving_deploy.md) +* [PaddleLite 部署](./deployment/paddle_lite_deploy.md)(目前仅支持分类模型) diff --git a/docs/zh_CN/inference_deployment/paddle_hub_serving_deploy.md b/docs/zh_CN/inference_deployment/paddle_hub_serving_deploy.md index e3892e9a96810c418ec508a555a9d276b3ba73ae..37d688b32051af8fe5a44dcd245c5340e5baafe2 100644 --- a/docs/zh_CN/inference_deployment/paddle_hub_serving_deploy.md +++ b/docs/zh_CN/inference_deployment/paddle_hub_serving_deploy.md @@ -1,9 +1,9 @@ +简体中文 | [English](../../en/inference_deployment/paddle_hub_serving_deploy_en.md) + # 基于 PaddleHub Serving 的服务部署 PaddleClas 支持通过 PaddleHub 快速进行服务化部署。目前支持图像分类的部署,图像识别的部署敬请期待。 ---- - ## 目录 - [1. 简介](#1) @@ -22,20 +22,20 @@ PaddleClas 支持通过 PaddleHub 快速进行服务化部署。目前支持图 hubserving 服务部署配置服务包 `clas` 下包含 3 个必选文件,目录如下: -``` -hubserving/clas/ - └─ __init__.py 空文件,必选 - └─ config.json 配置文件,可选,使用配置启动服务时作为参数传入 - └─ module.py 主模块,必选,包含服务的完整逻辑 - └─ params.py 参数文件,必选,包含模型路径、前后处理参数等参数 +```shell +deploy/hubserving/clas/ +├── __init__.py # 空文件,必选 +├── config.json # 配置文件,可选,使用配置启动服务时作为参数传入 +├── module.py # 主模块,必选,包含服务的完整逻辑 +└── params.py # 参数文件,必选,包含模型路径、前后处理参数等参数 ``` ## 2. 准备环境 ```shell -# 安装 paddlehub,请安装 2.0 版本 -pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple +# 安装 paddlehub,建议安装 2.1.0 版本 +python3.7 -m pip install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple ``` @@ -53,30 +53,27 @@ pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/sim ```python "inference_model_dir": "../inference/" ``` -需要注意, - * 模型文件(包括 `.pdmodel` 与 `.pdiparams`)名称必须为 `inference`。 - * 我们也提供了大量基于 ImageNet-1k 数据集的预训练模型,模型列表及下载地址详见[模型库概览](../algorithm_introduction/ImageNet_models.md),也可以使用自己训练转换好的模型。 +* 模型文件(包括 `.pdmodel` 与 `.pdiparams`)的名称必须为 `inference`。 +* 我们提供了大量基于 ImageNet-1k 数据集的预训练模型,模型列表及下载地址详见[模型库概览](../algorithm_introduction/ImageNet_models.md),也可以使用自己训练转换好的模型。 ## 4. 安装服务模块 -针对 Linux 环境和 Windows 环境,安装命令如下。 - * 在 Linux 环境下,安装示例如下: -```shell -cd PaddleClas/deploy -# 安装服务模块: -hub install hubserving/clas/ -``` + ```shell + cd PaddleClas/deploy + # 安装服务模块: + hub install hubserving/clas/ + ``` * 在 Windows 环境下(文件夹的分隔符为`\`),安装示例如下: -```shell -cd PaddleClas\deploy -# 安装服务模块: -hub install hubserving\clas\ -``` + ```shell + cd PaddleClas\deploy + # 安装服务模块: + hub install hubserving\clas\ + ``` @@ -84,36 +81,34 @@ hub install hubserving\clas\ -### 5.1 命令行命令启动 +### 5.1 命令行启动 该方式仅支持使用 CPU 预测。启动命令: ```shell -$ hub serving start --modules Module1==Version1 \ - --port XXXX \ - --use_multiprocess \ - --workers \ -``` +hub serving start \ +--modules clas_system +--port 8866 +``` +这样就完成了一个服务化 API 的部署,使用默认端口号 8866。 **参数说明**: -|参数|用途| -|-|-| -|--modules/-m| [**必选**] PaddleHub Serving 预安装模型,以多个 Module==Version 键值对的形式列出
*`当不指定 Version 时,默认选择最新版本`*| -|--port/-p| [**可选**] 服务端口,默认为 8866| +|参数|用途| +|-|-| +|--modules/-m| [**必选**] PaddleHub Serving 预安装模型,以多个 Module==Version 键值对的形式列出
*`当不指定 Version 时,默认选择最新版本`*| +|--port/-p| [**可选**] 服务端口,默认为 8866| |--use_multiprocess| [**可选**] 是否启用并发方式,默认为单进程方式,推荐多核 CPU 机器使用此方式
*`Windows 操作系统只支持单进程方式`*| -|--workers| [**可选**] 在并发方式下指定的并发任务数,默认为 `2*cpu_count-1`,其中 `cpu_count` 为 CPU 核数| - -如按默认参数启动服务:```hub serving start -m clas_system``` - -这样就完成了一个服务化 API 的部署,使用默认端口号 8866。 - +|--workers| [**可选**] 在并发方式下指定的并发任务数,默认为 `2*cpu_count-1`,其中 `cpu_count` 为 CPU 核数| +更多部署细节详见 [PaddleHub Serving模型一键服务部署](https://paddlehub.readthedocs.io/zh_CN/release-v2.1/tutorial/serving.html) ### 5.2 配置文件启动 该方式仅支持使用 CPU 或 GPU 预测。启动命令: -```hub serving start -c config.json``` +```shell +hub serving start -c config.json +``` 其中,`config.json` 格式如下: @@ -163,12 +158,21 @@ hub serving start -c hubserving/clas/config.json ```shell cd PaddleClas/deploy -python hubserving/test_hubserving.py server_url image_path -``` +python3.7 hubserving/test_hubserving.py \ +--server_url http://127.0.0.1:8866/predict/clas_system \ +--image_file ./hubserving/ILSVRC2012_val_00006666.JPEG \ +--batch_size 8 +``` +**预测输出** +```log +The result(s): class_ids: [57, 67, 68, 58, 65], label_names: ['garter snake, grass snake', 'diamondback, diamondback rattlesnake, Crotalus adamanteus', 'sidewinder, horned rattlesnake, Crotalus cerastes', 'water snake', 'sea snake'], scores: [0.21915, 0.15631, 0.14794, 0.13177, 0.12285] +The average time of prediction cost: 2.970 s/image +The average time cost: 3.014 s/image +The average top-1 score: 0.110 +``` **脚本参数说明**: -* **server_url**:服务地址,格式为 -`http://[ip_address]:[port]/predict/[module_name]` +* **server_url**:服务地址,格式为`http://[ip_address]:[port]/predict/[module_name]`。 * **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径。 * **batch_size**:[**可选**] 以 `batch_size` 大小为单位进行预测,默认为 `1`。 * **resize_short**:[**可选**] 预处理时,按短边调整大小,默认为 `256`。 @@ -178,41 +182,44 @@ python hubserving/test_hubserving.py server_url image_path **注意**:如果使用 `Transformer` 系列模型,如 `DeiT_***_384`, `ViT_***_384` 等,请注意模型的输入数据尺寸,需要指定`--resize_short=384 --crop_size=384`。 -访问示例: - -```shell -python hubserving/test_hubserving.py --server_url http://127.0.0.1:8866/predict/clas_system --image_file ./hubserving/ILSVRC2012_val_00006666.JPEG --batch_size 8 -``` - **返回结果格式说明**: 返回结果为列表(list),包含 top-k 个分类结果,以及对应的得分,还有此图片预测耗时,具体如下: -``` +```shell list: 返回结果 -└─ list: 第一张图片结果 - └─ list: 前 k 个分类结果,依 score 递减排序 - └─ list: 前 k 个分类结果对应的 score,依 score 递减排序 - └─ float: 该图分类耗时,单位秒 +└──list: 第一张图片结果 + ├── list: 前 k 个分类结果,依 score 递减排序 + ├── list: 前 k 个分类结果对应的 score,依 score 递减排序 + └── float: 该图分类耗时,单位秒 ``` + ## 7. 自定义修改服务模块 -如果需要修改服务逻辑,需要进行以下操作: +如果需要修改服务逻辑,需要进行以下操作: -1. 停止服务 -```hub serving stop --port/-p XXXX``` +1. 停止服务 + ```shell + hub serving stop --port/-p XXXX + ``` -2. 到相应的 `module.py` 和 `params.py` 等文件中根据实际需求修改代码。`module.py` 修改后需要重新安装(`hub install hubserving/clas/`)并部署。在进行部署前,可通过 `python hubserving/clas/module.py` 测试已安装服务模块。 +2. 到相应的 `module.py` 和 `params.py` 等文件中根据实际需求修改代码。`module.py` 修改后需要重新安装(`hub install hubserving/clas/`)并部署。在进行部署前,可先通过 `python3.7 hubserving/clas/module.py` 命令来快速测试准备部署的代码。 -3. 卸载旧服务包 -```hub uninstall clas_system``` +3. 卸载旧服务包 + ```shell + hub uninstall clas_system + ``` -4. 安装修改后的新服务包 -```hub install hubserving/clas/``` +4. 安装修改后的新服务包 + ```shell + hub install hubserving/clas/ + ``` -5.重新启动服务 -```hub serving start -m clas_system``` +5. 重新启动服务 + ```shell + hub serving start -m clas_system + ``` **注意**: 常用参数可在 `PaddleClas/deploy/hubserving/clas/params.py` 中修改: @@ -229,4 +236,4 @@ list: 返回结果 'class_id_map_file': ``` -为了避免不必要的延时以及能够以 batch_size 进行预测,数据预处理逻辑(包括 `resize`、`crop` 等操作)均在客户端完成,因此需要在 `PaddleClas/deploy/hubserving/test_hubserving.py#L35-L52` 中修改。 +为了避免不必要的延时以及能够以 batch_size 进行预测,数据预处理逻辑(包括 `resize`、`crop` 等操作)均在客户端完成,因此需要在 [PaddleClas/deploy/hubserving/test_hubserving.py#L41-L47](../../../deploy/hubserving/test_hubserving.py#L41-L47) 以及 [PaddleClas/deploy/hubserving/test_hubserving.py#L51-L76](../../../deploy/hubserving/test_hubserving.py#L51-L76) 中修改数据预处理逻辑相关代码。 diff --git a/docs/zh_CN/inference_deployment/paddle_lite_deploy.md b/docs/zh_CN/inference_deployment/paddle_lite_deploy.md index 68480f769a67aae33ca614b0eede2581fcf57392..bdfa89a2d8af904d5d0532053d09a2257ca83333 100644 --- a/docs/zh_CN/inference_deployment/paddle_lite_deploy.md +++ b/docs/zh_CN/inference_deployment/paddle_lite_deploy.md @@ -231,9 +231,9 @@ adb push imgs/tabby_cat.jpg /data/local/tmp/arm_cpu/ ```shell clas_model_file ./MobileNetV3_large_x1_0.nb # 模型文件地址 -label_path ./imagenet1k_label_list.txt # 类别映射文本文件 +label_path ./imagenet1k_label_list.txt # 类别映射文本文件 resize_short_size 256 # resize之后的短边边长 -crop_size 224 # 裁剪后用于预测的边长 +crop_size 224 # 裁剪后用于预测的边长 visualize 0 # 是否进行可视化,如果选择的话,会在当前文件夹下生成名为clas_result.png的图像文件 num_threads 1 # 线程数,默认是1。 precision FP32 # 精度类型,可以选择 FP32 或者 INT8,默认是 FP32。 @@ -263,4 +263,3 @@ A1:如果已经走通了上述步骤,更换模型只需要替换 `.nb` 模 Q2:换一个图测试怎么做? A2:替换 debug 下的测试图像为你想要测试的图像,使用 ADB 再次 push 到手机上即可。 - diff --git a/docs/zh_CN/inference_deployment/paddle_serving_deploy.md b/docs/zh_CN/inference_deployment/paddle_serving_deploy.md deleted file mode 100644 index 18faeb3655e78b04394aa5f64caca91f5f1ae630..0000000000000000000000000000000000000000 --- a/docs/zh_CN/inference_deployment/paddle_serving_deploy.md +++ /dev/null @@ -1,294 +0,0 @@ -# 模型服务化部署 --------- -## 目录 -- [1. 简介](#1) -- [2. Serving 安装](#2) -- [3. 图像分类服务部署](#3) - - [3.1 模型转换](#3.1) - - [3.2 服务部署和请求](#3.2) - - [3.2.1 Python Serving](#3.2.1) - - [3.2.2 C++ Serving](#3.2.2) -- [4. 图像识别服务部署](#4) - - [4.1 模型转换](#4.1) - - [4.2 服务部署和请求](#4.2) - - [4.2.1 Python Serving](#4.2.1) - - [4.2.2 C++ Serving](#4.2.2) -- [5. FAQ](#5) - - -## 1. 简介 -[Paddle Serving](https://github.com/PaddlePaddle/Serving) 旨在帮助深度学习开发者轻松部署在线预测服务,支持一键部署工业级的服务能力、客户端和服务端之间高并发和高效通信、并支持多种编程语言开发客户端。 - -该部分以 HTTP 预测服务部署为例,介绍怎样在 PaddleClas 中使用 PaddleServing 部署模型服务。目前只支持 Linux 平台部署,暂不支持 Windows 平台。 - - -## 2. Serving 安装 -Serving 官网推荐使用 docker 安装并部署 Serving 环境。首先需要拉取 docker 环境并创建基于 Serving 的 docker。 - -```shell -# 启动GPU docker -docker pull paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel -nvidia-docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel bash -nvidia-docker exec -it test bash - -# 启动CPU docker -docker pull paddlepaddle/serving:0.7.0-devel -docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-devel bash -docker exec -it test bash -``` - -进入 docker 后,需要安装 Serving 相关的 python 包。 -```shell -pip3 install paddle-serving-client==0.7.0 -pip3 install paddle-serving-app==0.7.0 -pip3 install faiss-cpu==1.7.1post2 - -#若为CPU部署环境: -pip3 install paddle-serving-server==0.7.0 # CPU -pip3 install paddlepaddle==2.2.0 # CPU - -#若为GPU部署环境 -pip3 install paddle-serving-server-gpu==0.7.0.post102 # GPU with CUDA10.2 + TensorRT6 -pip3 install paddlepaddle-gpu==2.2.0 # GPU with CUDA10.2 - -#其他GPU环境需要确认环境再选择执行哪一条 -pip3 install paddle-serving-server-gpu==0.7.0.post101 # GPU with CUDA10.1 + TensorRT6 -pip3 install paddle-serving-server-gpu==0.7.0.post112 # GPU with CUDA11.2 + TensorRT8 -``` - -* 如果安装速度太慢,可以通过 `-i https://pypi.tuna.tsinghua.edu.cn/simple` 更换源,加速安装过程。 -* 其他环境配置安装请参考: [使用Docker安装Paddle Serving](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_CN.md) - - - -## 3. 图像分类服务部署 - -### 3.1 模型转换 -使用 PaddleServing 做服务化部署时,需要将保存的 inference 模型转换为 Serving 模型。下面以经典的 ResNet50_vd 模型为例,介绍如何部署图像分类服务。 -- 进入工作目录: -```shell -cd deploy/paddleserving -``` -- 下载 ResNet50_vd 的 inference 模型: -```shell -# 下载并解压 ResNet50_vd 模型 -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar -``` -- 用 paddle_serving_client 把下载的 inference 模型转换成易于 Server 部署的模型格式: -``` -# 转换 ResNet50_vd 模型 -python3 -m paddle_serving_client.convert --dirname ./ResNet50_vd_infer/ \ - --model_filename inference.pdmodel \ - --params_filename inference.pdiparams \ - --serving_server ./ResNet50_vd_serving/ \ - --serving_client ./ResNet50_vd_client/ -``` -ResNet50_vd 推理模型转换完成后,会在当前文件夹多出 `ResNet50_vd_serving` 和 `ResNet50_vd_client` 的文件夹,具备如下格式: -``` -|- ResNet50_vd_serving/ - |- inference.pdiparams - |- inference.pdmodel - |- serving_server_conf.prototxt - |- serving_server_conf.stream.prototxt -|- ResNet50_vd_client - |- serving_client_conf.prototxt - |- serving_client_conf.stream.prototxt -``` -得到模型文件之后,需要分别修改 `ResNet50_vd_serving` 和 `ResNet50_vd_client` 下文件 `serving_server_conf.prototxt` 中的 alias 名字:将 `fetch_var` 中的 `alias_name` 改为 `prediction` - -**备注**: Serving 为了兼容不同模型的部署,提供了输入输出重命名的功能。这样,不同的模型在推理部署时,只需要修改配置文件的 alias_name 即可,无需修改代码即可完成推理部署。 -修改后的 serving_server_conf.prototxt 如下所示: -``` -feed_var { - name: "inputs" - alias_name: "inputs" - is_lod_tensor: false - feed_type: 1 - shape: 3 - shape: 224 - shape: 224 -} -fetch_var { - name: "save_infer_model/scale_0.tmp_1" - alias_name: "prediction" - is_lod_tensor: false - fetch_type: 1 - shape: 1000 -} -``` - -### 3.2 服务部署和请求 -paddleserving 目录包含了启动 pipeline 服务、C++ serving服务和发送预测请求的代码,包括: -```shell -__init__.py -config.yml # 启动pipeline服务的配置文件 -pipeline_http_client.py # http方式发送pipeline预测请求的脚本 -pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 -classification_web_service.py # 启动pipeline服务端的脚本 -run_cpp_serving.sh # 启动C++ Serving部署的脚本 -test_cpp_serving_client.py # rpc方式发送C++ serving预测请求的脚本 -``` - -#### 3.2.1 Python Serving -- 启动服务: -```shell -# 启动服务,运行日志保存在 log.txt -python3 classification_web_service.py &>log.txt & -``` - -- 发送请求: -```shell -# 发送服务请求 -python3 pipeline_http_client.py -``` -成功运行后,模型预测的结果会打印在 cmd 窗口中,结果如下: -``` -{'err_no': 0, 'err_msg': '', 'key': ['label', 'prob'], 'value': ["['daisy']", '[0.9341402053833008]'], 'tensors': []} -``` - - -#### 3.2.2 C++ Serving -- 启动服务: -```shell -# 启动服务, 服务在后台运行,运行日志保存在 nohup.txt -sh run_cpp_serving.sh -``` - -- 发送请求: -```shell -# 发送服务请求 -python3 test_cpp_serving_client.py -``` -成功运行后,模型预测的结果会打印在 cmd 窗口中,结果如下: -``` -prediction: daisy, probability: 0.9341399073600769 -``` - - -## 4.图像识别服务部署 -使用 PaddleServing 做服务化部署时,需要将保存的 inference 模型转换为 Serving 模型。 下面以 PP-ShiTu 中的超轻量图像识别模型为例,介绍图像识别服务的部署。 - -## 4.1 模型转换 -- 下载通用检测 inference 模型和通用识别 inference 模型 -``` -cd deploy -# 下载并解压通用识别模型 -wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar -cd models -tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar -# 下载并解压通用检测模型 -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar -tar -xf picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar -``` -- 转换识别 inference 模型为 Serving 模型: -``` -# 转换识别模型 -python3 -m paddle_serving_client.convert --dirname ./general_PPLCNet_x2_5_lite_v1.0_infer/ \ - --model_filename inference.pdmodel \ - --params_filename inference.pdiparams \ - --serving_server ./general_PPLCNet_x2_5_lite_v1.0_serving/ \ - --serving_client ./general_PPLCNet_x2_5_lite_v1.0_client/ -``` -识别推理模型转换完成后,会在当前文件夹多出 `general_PPLCNet_x2_5_lite_v1.0_serving/` 和 `general_PPLCNet_x2_5_lite_v1.0_client/` 的文件夹。分别修改 `general_PPLCNet_x2_5_lite_v1.0_serving/` 和 `general_PPLCNet_x2_5_lite_v1.0_client/` 目录下的 serving_server_conf.prototxt 中的 alias 名字: 将 `fetch_var` 中的 `alias_name` 改为 `features`。 -修改后的 serving_server_conf.prototxt 内容如下: -``` -feed_var { - name: "x" - alias_name: "x" - is_lod_tensor: false - feed_type: 1 - shape: 3 - shape: 224 - shape: 224 -} -fetch_var { - name: "save_infer_model/scale_0.tmp_1" - alias_name: "features" - is_lod_tensor: false - fetch_type: 1 - shape: 512 -} -``` -- 转换通用检测 inference 模型为 Serving 模型: -``` -# 转换通用检测模型 -python3 -m paddle_serving_client.convert --dirname ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ \ - --model_filename inference.pdmodel \ - --params_filename inference.pdiparams \ - --serving_server ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ \ - --serving_client ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ -``` -检测 inference 模型转换完成后,会在当前文件夹多出 `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/` 和 `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/` 的文件夹。 - -**注意:** 此处不需要修改 `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/` 目录下的 serving_server_conf.prototxt 中的 alias 名字。 - -- 下载并解压已经构建后的检索库 index -``` -cd ../ -wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar && tar -xf drink_dataset_v1.0.tar -``` - -## 4.2 服务部署和请求 -**注意:** 识别服务涉及到多个模型,出于性能考虑采用 PipeLine 部署方式。Pipeline 部署方式当前不支持 windows 平台。 -- 进入到工作目录 -```shell -cd ./deploy/paddleserving/recognition -``` -paddleserving 目录包含启动 Python Pipeline 服务、C++ Serving 服务和发送预测请求的代码,包括: -``` -__init__.py -config.yml # 启动python pipeline服务的配置文件 -pipeline_http_client.py # http方式发送pipeline预测请求的脚本 -pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 -recognition_web_service.py # 启动pipeline服务端的脚本 -run_cpp_serving.sh # 启动C++ Pipeline Serving部署的脚本 -test_cpp_serving_client.py # rpc方式发送C++ Pipeline serving预测请求的脚本 -``` - - -#### 4.2.1 Python Serving -- 启动服务: -``` -# 启动服务,运行日志保存在 log.txt -python3 recognition_web_service.py &>log.txt & -``` - -- 发送请求: -``` -python3 pipeline_http_client.py -``` -成功运行后,模型预测的结果会打印在 cmd 窗口中,结果如下: -``` -{'err_no': 0, 'err_msg': '', 'key': ['result'], 'value': ["[{'bbox': [345, 95, 524, 576], 'rec_docs': '红牛-强化型', 'rec_scores': 0.79903316}]"], 'tensors': []} -``` - - -#### 4.2.2 C++ Serving -- 启动服务: -```shell -# 启动服务: 此处会在后台同时启动主体检测和特征提取服务,端口号分别为9293和9294; -# 运行日志分别保存在 log_mainbody_detection.txt 和 log_feature_extraction.txt中 -sh run_cpp_serving.sh -``` - -- 发送请求: -```shell -# 发送服务请求 -python3 test_cpp_serving_client.py -``` -成功运行后,模型预测的结果会打印在 cmd 窗口中,结果如下所示: -``` -[{'bbox': [345, 95, 524, 586], 'rec_docs': '红牛-强化型', 'rec_scores': 0.8016462}] -``` - - -## 5.FAQ -**Q1**: 发送请求后没有结果返回或者提示输出解码报错 - -**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是: -``` -unset https_proxy -unset http_proxy -``` - -更多的服务部署类型,如 `RPC 预测服务` 等,可以参考 Serving 的[github 官网](https://github.com/PaddlePaddle/Serving/tree/v0.7.0/examples) diff --git a/docs/zh_CN/inference_deployment/python_deploy.md b/docs/zh_CN/inference_deployment/python_deploy.md index 39843df12d17265fc586b160003e3361edb8a14a..22b871344b782098ef9ded562cc7f2ce4277f790 100644 --- a/docs/zh_CN/inference_deployment/python_deploy.md +++ b/docs/zh_CN/inference_deployment/python_deploy.md @@ -2,14 +2,15 @@ --- -首先请参考文档[安装 PaddlePaddle](../installation/install_paddle.md)和文档[安装 PaddleClas](../installation/install_paddleclas.md)配置运行环境。 +首先请参考文档[环境准备](../installation/install_paddleclas.md)配置运行环境。 ## 目录 -- [1. 图像分类推理](#1) -- [2. 主体检测模型推理](#2) -- [3. 特征提取模型推理](#3) -- [4. 主体检测、特征提取和向量检索串联](#4) +- [1. 图像分类模型推理](#1) +- [2. PP-ShiTu模型推理](#2) + - [2.1 主体检测模型推理](#2.1) + - [2.2 特征提取模型推理](#2.2) + - [2.3 PP-ShiTu PipeLine推理](#2.3) ## 1. 图像分类推理 @@ -42,7 +43,12 @@ python python/predict_cls.py -c configs/inference_cls.yaml * 如果你希望提升评测模型速度,使用 GPU 评测时,建议开启 TensorRT 加速预测,使用 CPU 评测时,建议开启 MKL-DNN 加速预测。 -## 2. 主体检测模型推理 +## 2. PP-ShiTu模型推理 + +PP-ShiTu整个Pipeline包含三部分:主体检测、特提取模型、特征检索。其中主体检测、特征模型可以单独推理使用。单独主体检测详见[2.1](#2.1),特征提取模型单独推理详见[2.2](#2.2), PP-ShiTu整体推理详见[2.3](#2.3)。 + + +### 2.1 主体检测模型推理 进入 PaddleClas 的 `deploy` 目录下: @@ -70,8 +76,8 @@ python python/predict_det.py -c configs/inference_det.yaml * `Global.use_gpu`: 是否使用 GPU 预测,默认为 `True`。 - -## 3. 特征提取模型推理 + +### 2.2 特征提取模型推理 下面以商品特征提取为例,介绍特征提取模型推理。首先进入 PaddleClas 的 `deploy` 目录下: @@ -90,7 +96,7 @@ tar -xf ./models/product_ResNet50_vd_aliproduct_v1.0_infer.tar -C ./models/ 上述预测命令可以得到一个 512 维的特征向量,直接输出在在命令行中。 - -## 4. 主体检测、特征提取和向量检索串联 + +### 2.3. PP-ShiTu PipeLine推理 主体检测、特征提取和向量检索的串联预测,可以参考图像识别[快速体验](../quick_start/quick_start_recognition.md)。 diff --git a/docs/zh_CN/inference_deployment/recognition_serving_deploy.md b/docs/zh_CN/inference_deployment/recognition_serving_deploy.md new file mode 100644 index 0000000000000000000000000000000000000000..5311fe997269aecc1f956e8ebcdbcb628b3ed23c --- /dev/null +++ b/docs/zh_CN/inference_deployment/recognition_serving_deploy.md @@ -0,0 +1,281 @@ +简体中文 | [English](../../en/inference_deployment/recognition_serving_deploy_en.md) + +# 识别模型服务化部署 + +## 目录 + +- [1. 简介](#1-简介) +- [2. Serving 安装](#2-serving-安装) +- [3. 图像识别服务部署](#3-图像识别服务部署) + - [3.1 模型转换](#31-模型转换) + - [3.2 服务部署和请求](#32-服务部署和请求) + - [3.2.1 Python Serving](#321-python-serving) + - [3.2.2 C++ Serving](#322-c-serving) +- [4. FAQ](#4-faq) + + +## 1. 简介 + +[Paddle Serving](https://github.com/PaddlePaddle/Serving) 旨在帮助深度学习开发者轻松部署在线预测服务,支持一键部署工业级的服务能力、客户端和服务端之间高并发和高效通信、并支持多种编程语言开发客户端。 + +该部分以 HTTP 预测服务部署为例,介绍怎样在 PaddleClas 中使用 PaddleServing 部署模型服务。目前只支持 Linux 平台部署,暂不支持 Windows 平台。 + + +## 2. Serving 安装 + +Serving 官网推荐使用 docker 安装并部署 Serving 环境。首先需要拉取 docker 环境并创建基于 Serving 的 docker。 + +```shell +# 启动GPU docker +docker pull paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel +nvidia-docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-cuda10.2-cudnn7-devel bash +nvidia-docker exec -it test bash + +# 启动CPU docker +docker pull paddlepaddle/serving:0.7.0-devel +docker run -p 9292:9292 --name test -dit paddlepaddle/serving:0.7.0-devel bash +docker exec -it test bash +``` + +进入 docker 后,需要安装 Serving 相关的 python 包。 +```shell +python3.7 -m pip install paddle-serving-client==0.7.0 +python3.7 -m pip install paddle-serving-app==0.7.0 +python3.7 -m pip install faiss-cpu==1.7.1post2 + +#若为CPU部署环境: +python3.7 -m pip install paddle-serving-server==0.7.0 # CPU +python3.7 -m pip install paddlepaddle==2.2.0 # CPU + +#若为GPU部署环境 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post102 # GPU with CUDA10.2 + TensorRT6 +python3.7 -m pip install paddlepaddle-gpu==2.2.0 # GPU with CUDA10.2 + +#其他GPU环境需要确认环境再选择执行哪一条 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post101 # GPU with CUDA10.1 + TensorRT6 +python3.7 -m pip install paddle-serving-server-gpu==0.7.0.post112 # GPU with CUDA11.2 + TensorRT8 +``` + +* 如果安装速度太慢,可以通过 `-i https://pypi.tuna.tsinghua.edu.cn/simple` 更换源,加速安装过程。 +* 其他环境配置安装请参考:[使用Docker安装Paddle Serving](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_CN.md) + + + + +## 3. 图像识别服务部署 + +使用 PaddleServing 做图像识别服务化部署时,**需要将保存的多个 inference 模型都转换为 Serving 模型**。 下面以 PP-ShiTu 中的超轻量图像识别模型为例,介绍图像识别服务的部署。 + + +### 3.1 模型转换 + +- 进入工作目录: + ```shell + cd deploy/ + ``` +- 下载通用检测 inference 模型和通用识别 inference 模型 + ```shell + # 创建并进入models文件夹 + mkdir models + cd models + # 下载并解压通用识别模型 + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar + tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar + # 下载并解压通用检测模型 + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar + tar -xf picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar + ``` +- 转换通用识别 inference 模型为 Serving 模型: + ```shell + # 转换通用识别模型 + python3.7 -m paddle_serving_client.convert \ + --dirname ./general_PPLCNet_x2_5_lite_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./general_PPLCNet_x2_5_lite_v1.0_serving/ \ + --serving_client ./general_PPLCNet_x2_5_lite_v1.0_client/ + ``` + 上述命令的参数含义与[#3.1 模型转换](#3.1)相同 + 通用识别 inference 模型转换完成后,会在当前文件夹多出 `general_PPLCNet_x2_5_lite_v1.0_serving/` 和 `general_PPLCNet_x2_5_lite_v1.0_client/` 的文件夹,具备如下结构: + ```shell + ├── general_PPLCNet_x2_5_lite_v1.0_serving/ + │ ├── inference.pdiparams + │ ├── inference.pdmodel + │ ├── serving_server_conf.prototxt + │ └── serving_server_conf.stream.prototxt + │ + └── general_PPLCNet_x2_5_lite_v1.0_client/ + ├── serving_client_conf.prototxt + └── serving_client_conf.stream.prototxt + ``` +- 转换通用检测 inference 模型为 Serving 模型: + ```shell + # 转换通用检测模型 + python3.7 -m paddle_serving_client.convert --dirname ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ \ + --serving_client ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ + ``` + 上述命令的参数含义与[#3.1 模型转换](#3.1)相同 + + 识别推理模型转换完成后,会在当前文件夹多出 `general_PPLCNet_x2_5_lite_v1.0_serving/` 和 `general_PPLCNet_x2_5_lite_v1.0_client/` 的文件夹。分别修改 `general_PPLCNet_x2_5_lite_v1.0_serving/` 和 `general_PPLCNet_x2_5_lite_v1.0_client/` 目录下的 `serving_server_conf.prototxt` 中的 `alias` 名字: 将 `fetch_var` 中的 `alias_name` 改为 `features`。 修改后的 `serving_server_conf.prototxt` 内容如下 + + ```log + feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 + } + fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: false + fetch_type: 1 + shape: 512 + } + ``` + 通用检测 inference 模型转换完成后,会在当前文件夹多出 `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/` 和 `picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/` 的文件夹,具备如下结构: + ```shell + ├── picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ + │ ├── inference.pdiparams + │ ├── inference.pdmodel + │ ├── serving_server_conf.prototxt + │ └── serving_server_conf.stream.prototxt + │ + └── picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ + ├── serving_client_conf.prototxt + └── serving_client_conf.stream.prototxt + ``` + 上述命令中参数具体含义如下表所示 + | 参数 | 类型 | 默认值 | 描述 | + | ----------------- | ---- | ------------------ | ------------------------------------------------------------ | + | `dirname` | str | - | 需要转换的模型文件存储路径,Program结构文件和参数文件均保存在此目录。 | + | `model_filename` | str | None | 存储需要转换的模型Inference Program结构的文件名称。如果设置为None,则使用 `__model__` 作为默认的文件名 | + | `params_filename` | str | None | 存储需要转换的模型所有参数的文件名称。当且仅当所有模型参数被保>存在一个单独的二进制文件中,它才需要被指定。如果模型参数是存储在各自分离的文件中,设置它的值为None | + | `serving_server` | str | `"serving_server"` | 转换后的模型文件和配置文件的存储路径。默认值为serving_server | + | `serving_client` | str | `"serving_client"` | 转换后的客户端配置文件存储路径。默认值为serving_client | + +- 下载并解压已经构建后完成的检索库 index + ```shell + # 回到deploy目录 + cd ../ + # 下载构建完成的检索库 index + wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar + # 解压构建完成的检索库 index + tar -xf drink_dataset_v1.0.tar + ``` + +### 3.2 服务部署和请求 + +**注意:** 识别服务涉及到多个模型,出于性能考虑采用 PipeLine 部署方式。Pipeline 部署方式当前不支持 windows 平台。 +- 进入到工作目录 + ```shell + cd ./deploy/paddleserving/recognition + ``` + paddleserving 目录包含启动 Python Pipeline 服务、C++ Serving 服务和发送预测请求的代码,包括: + ```shell + __init__.py + config.yml # 启动python pipeline服务的配置文件 + pipeline_http_client.py # http方式发送pipeline预测请求的脚本 + pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 + recognition_web_service.py # 启动pipeline服务端的脚本 + readme.md # 识别模型服务化部署文档 + run_cpp_serving.sh # 启动C++ Pipeline Serving部署的脚本 + test_cpp_serving_client.py # rpc方式发送C++ Pipeline serving预测请求的脚本 + ``` + + +#### 3.2.1 Python Serving + +- 启动服务: + ```shell + # 启动服务,运行日志保存在 log.txt + python3.7 recognition_web_service.py &>log.txt & + ``` + +- 发送请求: + ```shell + python3.7 pipeline_http_client.py + ``` + 成功运行后,模型预测的结果会打印在客户端中,如下所示: + ```log + {'err_no': 0, 'err_msg': '', 'key': ['result'], 'value': ["[{'bbox': [345, 95, 524, 576], 'rec_docs': '红牛-强化型', 'rec_scores': 0.79903316}]"], 'tensors': []} + ``` + + +#### 3.2.2 C++ Serving + +与Python Serving不同,C++ Serving客户端调用 C++ OP来预测,因此在启动服务之前,需要编译并安装 serving server包,并设置 `SERVING_BIN`。 +- 编译并安装Serving server包 + ```shell + # 进入工作目录 + cd PaddleClas/deploy/paddleserving + # 一键编译安装Serving server、设置 SERVING_BIN + source ./build_server.sh python3.7 + ``` + **注:**[build_server.sh](../build_server.sh#L55-L62)所设定的路径可能需要根据实际机器上的环境如CUDA、python版本等作一定修改,然后再编译。 + +- C++ Serving使用的输入输出格式与Python不同,因此需要执行以下命令,将4个文件复制到下的文件覆盖掉[3.1](#31-模型转换)得到文件夹中的对应4个prototxt文件。 + ```shell + # 进入PaddleClas/deploy目录 + cd PaddleClas/deploy/ + + # 覆盖prototxt文件 + \cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ./models/general_PPLCNet_x2_5_lite_v1.0_serving/ + \cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ./models/general_PPLCNet_x2_5_lite_v1.0_client/ + \cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ + \cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ + ``` + +- 启动服务: + ```shell + # 进入工作目录 + cd PaddleClas/deploy/paddleserving/recognition + + # 端口号默认为9400;运行日志默认保存在 log_PPShiTu.txt 中 + # CPU部署 + bash run_cpp_serving.sh + # GPU部署,并指定第0号卡 + bash run_cpp_serving.sh 0 + ``` + +- 发送请求: + ```shell + # 发送服务请求 + python3.7 test_cpp_serving_client.py + ``` + 成功运行后,模型预测的结果会打印在客户端中,如下所示: + ```log + WARNING: Logging before InitGoogleLogging() is written to STDERR + I0614 03:01:36.273097 6084 naming_service_thread.cpp:202] brpc::policy::ListNamingService("127.0.0.1:9400"): added 1 + I0614 03:01:37.393564 6084 general_model.cpp:490] [client]logid=0,client_cost=1107.82ms,server_cost=1101.75ms. + [{'bbox': [345, 95, 524, 585], 'rec_docs': '红牛-强化型', 'rec_scores': 0.8073724}] + ``` + +- 关闭服务 +如果服务程序在前台运行,可以按下`Ctrl+C`来终止服务端程序;如果在后台运行,可以使用kill命令关闭相关进程,也可以在启动服务程序的路径下执行以下命令来终止服务端程序: + ```bash + python3.7 -m paddle_serving_server.serve stop + ``` + 执行完毕后出现`Process stopped`信息表示成功关闭服务。 + + +## 4. FAQ + +**Q1**: 发送请求后没有结果返回或者提示输出解码报错 + +**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是: +```shell +unset https_proxy +unset http_proxy +``` +**Q2**: 启动服务后没有任何反应 + +**A2**: 可以检查`config.yml`中`model_config`对应的路径是否存在,文件夹命名是否正确 + +更多的服务部署类型,如 `RPC 预测服务` 等,可以参考 Serving 的[github 官网](https://github.com/PaddlePaddle/Serving/tree/v0.9.0/examples) diff --git a/docs/zh_CN/inference_deployment/whl_deploy.md b/docs/zh_CN/inference_deployment/whl_deploy.md index 14582ace5ce13636c7c14e7fdb9ba9ad2ebbfe90..e6ad70904853d17f89974ff62b812a3420d21a2b 100644 --- a/docs/zh_CN/inference_deployment/whl_deploy.md +++ b/docs/zh_CN/inference_deployment/whl_deploy.md @@ -18,7 +18,7 @@ PaddleClas 支持 Python Whl 包方式进行预测,目前 Whl 包方式仅支 - [4.6 对 `NumPy.ndarray` 格式数据进行预测](#4.6) - [4.7 保存预测结果](#4.7) - [4.8 指定 label name](#4.8) - + ## 1. 安装 paddleclas @@ -212,14 +212,14 @@ print(next(result)) ```python from paddleclas import PaddleClas clas = PaddleClas(model_name='ResNet50', save_dir='./output_pre_label/') -infer_imgs = 'docs/images/whl/' # it can be infer_imgs folder path which contains all of images you want to predict. +infer_imgs = 'docs/images/' # it can be infer_imgs folder path which contains all of images you want to predict. result=clas.predict(infer_imgs) print(next(result)) ``` * CLI ```bash -paddleclas --model_name='ResNet50' --infer_imgs='docs/images/whl/' --save_dir='./output_pre_label/' +paddleclas --model_name='ResNet50' --infer_imgs='docs/images/' --save_dir='./output_pre_label/' ``` diff --git a/docs/zh_CN/installation/install_paddle.md b/docs/zh_CN/installation/install_paddle.md deleted file mode 100644 index 995d28797c3078956af5571ef11506c2028481e4..0000000000000000000000000000000000000000 --- a/docs/zh_CN/installation/install_paddle.md +++ /dev/null @@ -1,101 +0,0 @@ -# 安装 PaddlePaddle - ---- -## 目录 - -- [1. 环境要求](#1) -- [2.(建议)使用 Docker 环境](#2) -- [3. 通过 pip 安装 PaddlePaddle](#3) -- [4. 验证安装](#4) - -目前,**PaddleClas** 要求 **PaddlePaddle** 版本 `>=2.0`。建议使用我们提供的 Docker 运行 PaddleClas,有关 Docker、nvidia-docker 的相关使用教程可以参考[链接](https://www.runoob.com/Docker/Docker-tutorial.html)。如果不使用 Docker,可以直接跳过 [2.(建议)使用 Docker 环境](#2) 部分内容,从 [3. 通过 pip 安装 PaddlePaddle](#3) 部分开始。 - - - -## 1. 环境要求 - -**版本要求**: -- python 3.x -- CUDA >= 10.1(如果使用 `paddlepaddle-gpu`) -- cuDNN >= 7.6.4(如果使用 `paddlepaddle-gpu`) -- nccl >= 2.1.2(如果使用分布式训练/评估) -- gcc >= 8.2 - -**建议**: -* 当 CUDA 版本为 10.1 时,显卡驱动版本 `>= 418.39`; -* 当 CUDA 版本为 10.2 时,显卡驱动版本 `>= 440.33`; -* 更多 CUDA 版本与要求的显卡驱动版本可以参考[链接](https://docs.nvidia.com/deploy/cuda-compatibility/index.html)。 - - - -## 2.(建议)使用 Docker 环境 - -* 切换到工作目录下 - -```shell -cd /home/Projects -``` - -* 创建 docker 容器 - -下述命令会创建一个名为 ppcls 的 Docker 容器,并将当前工作目录映射到容器内的 `/paddle` 目录。 - -```shell -# 对于 GPU 用户 -sudo nvidia-docker run --name ppcls -v $PWD:/paddle --shm-size=8G --network=host -it paddlepaddle/paddle:2.1.0-gpu-cuda10.2-cudnn7 /bin/bash - -# 对于 CPU 用户 -sudo docker run --name ppcls -v $PWD:/paddle --shm-size=8G --network=host -it paddlepaddle/paddle:2.1.0 /bin/bash -``` - -**注意**: -* 首次使用该镜像时,下述命令会自动下载该镜像文件,下载需要一定的时间,请耐心等待; -* 上述命令会创建一个名为 ppcls 的 Docker 容器,之后再次使用该容器时无需再次运行该命令; -* 参数 `--shm-size=8G` 将设置容器的共享内存为 8 G,如机器环境允许,建议将该参数设置较大,如 `64G`; -* 您也可以访问 [DockerHub](https://hub.Docker.com/r/paddlepaddle/paddle/tags/) 获取与您机器适配的镜像; -* 退出/进入 docker 容器: - * 在进入 Docker 容器后,可使用组合键 `Ctrl + P + Q` 退出当前容器,同时不关闭该容器; - * 如需再次进入容器,可使用下述命令: - - ```shell - sudo Docker exec -it ppcls /bin/bash - ``` - - - -## 3. 通过 pip 安装 PaddlePaddle - -可运行下面的命令,通过 pip 安装最新版本 PaddlePaddle: - -```bash -# 对于 CPU 用户 -pip install paddlepaddle --upgrade -i https://mirror.baidu.com/pypi/simple - -# 对于 GPU 用户 -pip install paddlepaddle-gpu --upgrade -i https://mirror.baidu.com/pypi/simple -``` - -**注意:** -* 如果先安装了 CPU 版本的 PaddlePaddle,之后想切换到 GPU 版本,那么需要使用 pip 先卸载 CPU 版本的 PaddlePaddle,再安装 GPU 版本的 PaddlePaddle,否则容易导致 PaddlePaddle 冲突。 -* 您也可以从源码编译安装 PaddlePaddle,请参照 [PaddlePaddle 安装文档](http://www.paddlepaddle.org.cn/install/quick) 中的说明进行操作。 - - -## 4. 验证安装 - -使用以下命令可以验证 PaddlePaddle 是否安装成功。 - -```python -import paddle -paddle.utils.run_check() -``` - -查看 PaddlePaddle 版本的命令如下: - -```bash -python -c "import paddle; print(paddle.__version__)" -``` - -**注意**: -- 从源码编译的 PaddlePaddle 版本号为 `0.0.0`,请确保使用 PaddlePaddle 2.0 及之后的源码进行编译; -- PaddleClas 基于 PaddlePaddle 高性能的分布式训练能力,若您从源码编译,请确保打开编译选项 `WITH_DISTRIBUTE=ON`。具体编译选项参考 [编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#bianyixuanxiangbiao); -- 在 Docker 中运行时,为保证 Docker 容器有足够的共享内存用于 Paddle 的数据读取加速,在创建 Docker 容器时,请设置参数 `--shm-size=8g`,条件允许的话可以设置为更大的值。 diff --git a/docs/zh_CN/installation/install_paddleclas.md b/docs/zh_CN/installation/install_paddleclas.md index 0f70bf2364589dbe85bf09128fc034d9d250d22b..e02acc6fdae4f211c07232489d07b31bd187da1d 100644 --- a/docs/zh_CN/installation/install_paddleclas.md +++ b/docs/zh_CN/installation/install_paddleclas.md @@ -1,29 +1,94 @@ -# 安装 PaddleClas +# 环境准备 --- ## 目录 -* [1. 克隆 PaddleClas](#1) -* [2. 安装 Python 依赖库](#2) +- [1. 安装 PaddlePaddle](#1) + - [1.1 使用Paddle官方镜像](#1.1) + - [1.2 在现有环境中安装paddle](#1.2) + - [1.3 安装验证](#1.3) +- [2. 克隆 PaddleClas](#2) +- [3. 安装 Python 依赖库](#3) +### 1.安装PaddlePaddle +目前,**PaddleClas** 要求 **PaddlePaddle** 版本 `>=2.3`。 +建议使用Paddle官方提供的 Docker 镜像运行 PaddleClas,有关 Docker、nvidia-docker 的相关使用教程可以参考[链接](https://www.runoob.com/Docker/Docker-tutorial.html)。 -## 1. 克隆 PaddleClas + + +#### 1.1(建议)使用 Docker 环境 + +* 切换到工作目录下,例如工作目录为`/home/Projects`,则运行命令: + +```shell +cd /home/Projects +``` + +* 创建 docker 容器 + +下述命令会创建一个名为 ppcls 的 Docker 容器,并将当前工作目录映射到容器内的 `/paddle` 目录。 + +```shell +# 对于 GPU 用户 +sudo nvidia-docker run --name ppcls -v $PWD:/paddle --shm-size=8G --network=host -it registry.baidubce.com/paddlepaddle/paddle:2.3.0-gpu-cuda10.2-cudnn7 /bin/bash + +# 对于 CPU 用户 +sudo docker run --name ppcls -v $PWD:/paddle --shm-size=8G --network=host -it paddlepaddle/paddle:2.3.0-gpu-cuda10.2-cudnn7 /bin/bash +``` + +**注意**: +* 首次使用该镜像时,下述命令会自动下载该镜像文件,下载需要一定的时间,请耐心等待; +* 上述命令会创建一个名为 ppcls 的 Docker 容器,之后再次使用该容器时无需再次运行该命令; +* 参数 `--shm-size=8G` 将设置容器的共享内存为 8 G,如机器环境允许,建议将该参数设置较大,如 `64G`; +* 您也可以访问 [DockerHub](https://hub.Docker.com/r/paddlepaddle/paddle/tags/) ,手动选择需要的镜像; +* 退出/进入 docker 容器: + * 在进入 Docker 容器后,可使用组合键 `Ctrl + P + Q` 退出当前容器,同时不关闭该容器; + * 如需再次进入容器,可使用下述命令: + + ```shell + sudo Docker exec -it ppcls /bin/bash + ``` + +#### 1.2 在现有环境中安装paddle +您也可以用pip或conda直接安装paddle,详情请参考官方文档中的[快速安装](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/docker/linux-docker.html)部分。 + +#### 1.3 安装验证 +使用以下命令可以验证 PaddlePaddle 是否安装成功。 +```python +import paddle +paddle.utils.run_check() +``` +查看 PaddlePaddle 版本的命令如下: + +```bash +python -c "import paddle; print(paddle.__version__)" +``` + +**注意**: +- 从源码编译的 PaddlePaddle 版本号为 `0.0.0`,请确保使用 PaddlePaddle 2.3 及之后的源码进行编译; +- PaddleClas 基于 PaddlePaddle 高性能的分布式训练能力,若您从源码编译,请确保打开编译选项 `WITH_DISTRIBUTE=ON`。具体编译选项参考 [编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#bianyixuanxiangbiao); +- 在 Docker 中运行时,为保证 Docker 容器有足够的共享内存用于 Paddle 的数据读取加速,在创建 Docker 容器时,请设置参数 `--shm-size=8g`,条件允许的话可以设置为更大的值。 + + + + +### 2. 克隆 PaddleClas 从 GitHub 下载: ```shell -git clone https://github.com/PaddlePaddle/PaddleClas.git -b release/2.3 +git clone https://github.com/PaddlePaddle/PaddleClas.git -b release/2.4 ``` 如果访问 GitHub 网速较慢,可以从 Gitee 下载,命令如下: ```shell -git clone https://gitee.com/paddlepaddle/PaddleClas.git -b release/2.3 +git clone https://gitee.com/paddlepaddle/PaddleClas.git -b release/2.4 ``` - + -## 2. 安装 Python 依赖库 +### 3. 安装 Python 依赖库 PaddleClas 的 Python 依赖库在 `requirements.txt` 中给出,可通过如下命令安装: diff --git a/docs/zh_CN/models/MobileViT.md b/docs/zh_CN/models/MobileViT.md index 2980fb38f80c412a18b73674eac7cd3fd7793ce5..8d225c58a9fe604f395c7620357e765954378328 100644 --- a/docs/zh_CN/models/MobileViT.md +++ b/docs/zh_CN/models/MobileViT.md @@ -17,6 +17,6 @@ MobileViT 是一个轻量级的视觉 Transformer 网络,可以用作计算机 | Models | Top1 | Top5 | Reference
top1 | Reference
top5 | FLOPs
(M) | Params
(M) | |:--:|:--:|:--:|:--:|:--:|:--:|:--:| -| MobileViT_XXS | 0.6867 | 0.8878 | 0.690 | - | 1849.35 | 5.59 | +| MobileViT_XXS | 0.6867 | 0.8878 | 0.690 | - | 337.24 | 1.28 | | MobileViT_XS | 0.7454 | 0.9227 | 0.747 | - | 930.75 | 2.33 | -| MobileViT_S | 0.7814 | 0.9413 | 0.783 | - | 337.24 | 1.28 | +| MobileViT_S | 0.7814 | 0.9413 | 0.783 | - | 1849.35 | 5.59 | diff --git a/docs/zh_CN/models/PP-HGNet.md b/docs/zh_CN/models/PP-HGNet.md new file mode 100644 index 0000000000000000000000000000000000000000..f89c11c84b20723a84f98754d090ea1119931e92 --- /dev/null +++ b/docs/zh_CN/models/PP-HGNet.md @@ -0,0 +1,407 @@ +# PP-HGNet 系列 +--- +- [1. 模型介绍](#1) + - [1.1 模型简介](#1.1) + - [1.2 模型细节](#1.2) + - [1.3 实验结果](#1.3) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型推理部署](#4) + - [4.1 推理模型准备](#4.1) + - [4.1.1 基于训练得到的权重导出 inference 模型](#4.1.1) + - [4.1.2 直接下载 inference 模型](#4.1.2) + - [4.2 基于 Python 预测引擎推理](#4.2) + - [4.2.1 预测单张图像](#4.2.1) + - [4.2.2 基于文件夹的批量预测](#4.2.2) + - [4.3 基于 C++ 预测引擎推理](#4.3) + - [4.4 服务化部署](#4.4) + - [4.5 端侧部署](#4.5) + - [4.6 Paddle2ONNX 模型转换与预测](#4.6) + + + +## 1. 模型介绍 + + + +### 1.1 模型简介 + +PP-HGNet(High Performance GPU Net) 是百度飞桨视觉团队自研的更适用于 GPU 平台的高性能骨干网络,该网络在 VOVNet 的基础上使用了可学习的下采样层(LDS Layer),融合了 ResNet_vd、PPHGNet 等模型的优点,该模型在 GPU 平台上与其他 SOTA 模型在相同的速度下有着更高的精度。在同等速度下,该模型高于 ResNet34-D 模型 3.8 个百分点,高于 ResNet50-D 模型 2.4 个百分点,在使用百度自研 SSLD 蒸馏策略后,超越 ResNet50-D 模型 4.7 个百分点。与此同时,在相同精度下,其推理速度也远超主流 VisionTransformer 的推理速度。 + + + +### 1.2 模型细节 + +PP-HGNet 作者针对 GPU 设备,对目前 GPU 友好的网络做了分析和归纳,尽可能多的使用 3x3 标准卷积(计算密度最高)。在此将 VOVNet 作为基准模型,将主要的有利于 GPU 推理的改进点进行融合。从而得到一个有利于 GPU 推理的骨干网络,同样速度下,精度大幅超越其他 CNN 或者 VisionTransformer 模型。 + +PP-HGNet 骨干网络的整体结构如下: + +![](../../images/PP-HGNet/PP-HGNet.png) + +其中,PP-HGNet是由多个HG-Block组成,HG-Block的细节如下: + +![](../../images/PP-HGNet/PP-HGNet-block.png) + + + +### 1.3 实验结果 + +PP-HGNet 目前提供的模型的精度、速度指标及预训练权重链接如下: + +| Model | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) | 预训练模型下载地址 | inference模型下载地址 | +|:--: |:--: |:--: |:--: | :--: |:--: | +| PPHGNet_tiny | 79.83 | 95.04 | 1.77 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar) | +| PPHGNet_tiny_ssld | 81.95 | 96.12 | 1.77 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_ssld_infer.tar) | +| PPHGNet_small | 81.51| 95.82 | 2.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar) | +| PPHGNet_small_ssld | 83.82| 96.81 | 2.52 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_ssld_infer.tar) | +| PPHGNet_base_ssld | 85.00| 97.35 | 5.97 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_base_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_base_ssld_infer.tar) | + +**备注:** + +* 1. `_ssld` 表示使用 `SSLD 蒸馏`后的模型。关于 `SSLD蒸馏` 的内容,详情 [SSLD 蒸馏](../advanced_tutorials/knowledge_distillation.md)。 +* 2. PP-HGNet 更多模型指标及权重,敬请期待。 + +PP-HGNet 与其他模型的比较如下,其中测试机器为 NVIDIA® Tesla® V100,开启 TensorRT 引擎,精度类型为 FP32。在相同速度下,PP-HGNet 精度均超越了其他 SOTA CNN 模型,在与 SwinTransformer 模型的比较中,在更高精度的同时,速度快 2 倍以上。 + +| Model | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) | +|:--: |:--: |:--: |:--: | +| ResNet34 | 74.57 | 92.14 | 1.97 | +| ResNet34_vd | 75.98 | 92.98 | 2.00 | +| EfficientNetB0 | 77.38 | 93.31 | 1.96 | +| PPHGNet_tiny | 79.83 | 95.04 | 1.77 | +| PPHGNet_tiny_ssld | 81.95 | 96.12 | 1.77 | +| ResNet50 | 76.50 | 93.00 | 2.54 | +| ResNet50_vd | 79.12 | 94.44 | 2.60 | +| ResNet50_rsb | 80.40 | | 2.54 | +| EfficientNetB1 | 79.15 | 94.41 | 2.88 | +| SwinTransformer_tiny | 81.2 | 95.5 | 6.59 | +| PPHGNet_small | 81.51| 95.82 | 2.52 | +| PPHGNet_small_ssld | 83.82| 96.81 | 2.52 | +| Res2Net200_vd_26w_4s_ssld| 85.13 | 97.42 | 11.45 | +| ResNeXt101_32x48d_wsl | 85.37 | 97.69 | 55.07 | +| SwinTransformer_base | 85.2 | 97.5 | 13.53 | +| PPHGNet_base_ssld | 85.00| 97.35 | 5.97 | + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +* 在命令行中使用 PPHGNet_small 的权重快速预测 + +```bash +paddleclas --model_name=PPHGNet_small --infer_imgs="docs/images/inference_deployment/whl_demo.jpg" +``` + +结果如下: +``` +>>> result +class_ids: [8, 7, 86, 82, 81], scores: [0.71479, 0.08682, 0.00806, 0.0023, 0.00121], label_names: ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'ptarmigan'], filename: docs/images/inference_deployment/whl_demo.jpg +Predict complete! +``` + +**备注**: 更换 PPHGNet 的其他 scale 的模型时,只需替换 `model_name`,如将此时的模型改为 `PPHGNet_tiny` 时,只需要将 `--model_name=PPHGNet_small` 改为 `--model_name=PPHGNet_tiny` 即可。 + + +* 在 Python 代码中预测 +```python +from paddleclas import PaddleClas +clas = PaddleClas(model_name='PPHGNet_small') +infer_imgs = 'docs/images/inference_deployment/whl_demo.jpg' +result = clas.predict(infer_imgs) +print(next(result)) +``` + +**备注**:`PaddleClas.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭 +代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果。返回结果示例如下: + +``` +>>> result +[{'class_ids': [8, 7, 86, 82, 81], 'scores': [0.71479, 0.08682, 0.00806, 0.0023, 0.00121], 'label_names': ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'ptarmigan'], 'filename': 'docs/images/inference_deployment/whl_demo.jpg'}] +``` + + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档[环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + +请在[ImageNet 官网](https://www.image-net.org/)准备 ImageNet-1k 相关的数据。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,将下载好的数据命名为 `ILSVRC2012` ,存放于此。 `ILSVRC2012` 目录中具有以下数据: + +``` +├── train +│   ├── n01440764 +│   │   ├── n01440764_10026.JPEG +│   │   ├── n01440764_10027.JPEG +├── train_list.txt +... +├── val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +├── val_list.txt +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml` 中提供了 PPHGNet_small 训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +``` + + +**备注:** + +* 当前精度最佳的模型会保存在 `output/PPHGNet_small/best_model.pdparams` + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml \ + -o Global.pretrained_model=output/PPHGNet_small/best_model +``` + +其中 `-o Global.pretrained_model="output/PPHGNet_small/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml \ + -o Global.pretrained_model=output/PPHGNet_small/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [8, 7, 86, 82, 81], 'scores': [0.71479, 0.08682, 0.00806, 0.0023, 0.00121], 'file_name': 'docs/images/inference_deployment/whl_demo.jpg', 'label_names': ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'ptarmigan']}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPHGNet_small/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `docs/images/inference_deployment/whl_demo.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 默认输出的是 Top-5 的值,如果希望输出 Top-k 的值,可以指定`-o Infer.PostProcess.topk=k`,其中,`k` 为您指定的值。 + + + + + +## 4. 模型推理部署 + + + +### 4.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + + +### 4.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml \ + -o Global.pretrained_model=output/PPHGNet_small/best_model \ + -o Global.save_inference_dir=deploy/models/PPHGNet_small_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPHGNet_small_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPHGNet_small_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + + +### 4.1.2 直接下载 inference 模型 + +[4.1.1 小节](#4.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar && tar -xf PPHGNet_small_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── PPHGNet_small_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 4.2 基于 Python 预测引擎推理 + + + + +#### 4.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/ImageNet/ILSVRC2012_val_00000010.jpeg` 进行分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPHGNet_small_infer +# 使用下面的命令使用 CPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPHGNet_small_infer -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [332, 153, 283, 338, 204], score(s): [0.50, 0.05, 0.02, 0.01, 0.01], label_name(s): ['Angora, Angora rabbit', 'Maltese dog, Maltese terrier, Maltese', 'Persian cat', 'guinea pig, Cavia cobaya', 'Lhasa, Lhasa apso'] +``` + + + +#### 4.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPHGNet_small_infer -o Global.infer_imgs=images/ImageNet/ +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [332, 153, 283, 338, 204], score(s): [0.50, 0.05, 0.02, 0.01, 0.01], label_name(s): ['Angora, Angora rabbit', 'Maltese dog, Maltese terrier, Maltese', 'Persian cat', 'guinea pig, Cavia cobaya', 'Lhasa, Lhasa apso'] +ILSVRC2012_val_00010010.jpeg: class id(s): [626, 622, 531, 487, 633], score(s): [0.68, 0.02, 0.02, 0.02, 0.02], label_name(s): ['lighter, light, igniter, ignitor', 'lens cap, lens cover', 'digital watch', 'cellular telephone, cellular phone, cellphone, cell, mobile phone', "loupe, jeweler's loupe"] +ILSVRC2012_val_00020010.jpeg: class id(s): [178, 211, 171, 246, 741], score(s): [0.82, 0.00, 0.00, 0.00, 0.00], label_name(s): ['Weimaraner', 'vizsla, Hungarian pointer', 'Italian greyhound', 'Great Dane', 'prayer rug, prayer mat'] +ILSVRC2012_val_00030010.jpeg: class id(s): [80, 83, 136, 23, 93], score(s): [0.84, 0.00, 0.00, 0.00, 0.00], label_name(s): ['black grouse', 'prairie chicken, prairie grouse, prairie fowl', 'European gallinule, Porphyrio porphyrio', 'vulture', 'hornbill'] +``` + + + + +### 4.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 4.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 4.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 4.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 + diff --git a/docs/zh_CN/models/PP-LCNet.md b/docs/zh_CN/models/PP-LCNet.md index 7fea973d4634228fefcccff0e7e856546b3b9652..2df3c3e297f3f20cb3ffca62c67397f61364de3f 100644 --- a/docs/zh_CN/models/PP-LCNet.md +++ b/docs/zh_CN/models/PP-LCNet.md @@ -3,54 +3,77 @@ ## 目录 -- [1. 摘要](#1) -- [2. 介绍](#2) -- [3. 方法](#3) - - [3.1 更好的激活函数](#3.1) - - [3.2 合适的位置添加 SE 模块](#3.2) - - [3.3 合适的位置添加更大的卷积核](#3.3) - - [3.4 GAP 后使用更大的 1x1 卷积层](#3.4) -- [4. 实验部分](#4) - - [4.1 图像分类](#4.1) - - [4.2 目标检测](#4.2) - - [4.3 语义分割](#4.3) -- [5. 基于 V100 GPU 的预测速度](#5) -- [6. 基于 SD855 的预测速度](#6) -- [7. 总结](#7) -- [8. 引用](#8) +- [1. 模型介绍](#1) + - [1.1 模型简介](#1.1) + - [1.2 模型细节](#1.2) + - [1.2.1 更好的激活函数](#1.2.1) + - [1.2.2 合适的位置添加 SE 模块](#1.2.2) + - [1.2.3 合适的位置添加更大的卷积核](#1.2.3) + - [1.2.4 GAP 后使用更大的 1x1 卷积层](#1.2.4) + - [1.3 实验结果](#1.3) + - [1.4 Benchmark](#1.4) + - [1.4.1 基于 Intel Xeon Gold 6148 的预测速度](#1.4.1) + - [1.4.2 基于 V100 GPU 的预测速度](#1.4.2) + - [1.4.3 基于 SD855 的预测速度](#1.4.3) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型推理部署](#4) + - [4.1 推理模型准备](#4.1) + - [4.1.1 基于训练得到的权重导出 inference 模型](#4.1.1) + - [4.1.2 直接下载 inference 模型](#4.1.2) + - [4.2 基于 Python 预测引擎推理](#4.2) + - [4.2.1 预测单张图像](#4.2.1) + - [4.2.2 基于文件夹的批量预测](#4.2.2) + - [4.3 基于 C++ 预测引擎推理](#4.3) + - [4.4 服务化部署](#4.4) + - [4.5 端侧部署](#4.5) + - [4.6 Paddle2ONNX 模型转换与预测](#4.6) +- [5. 引用](#5) + + -## 1. 摘要 +## 1. 模型介绍 -在计算机视觉领域中,骨干网络的好坏直接影响到整个视觉任务的结果。在之前的一些工作中,相关的研究者普遍将 FLOPs 或者 Params 作为优化目的,但是在工业界真实落地的场景中,推理速度才是考量模型好坏的重要指标,然而,推理速度和准确性很难兼得。考虑到工业界有很多基于 Intel CPU 的应用,所以我们本次的工作旨在使骨干网络更好的适应 Intel CPU,从而得到一个速度更快、准确率更高的轻量级骨干网络,与此同时,目标检测、语义分割等下游视觉任务的性能也同样得到提升。 +### 1.1 模型简介 - -## 2. 介绍 +在计算机视觉领域中,骨干网络的好坏直接影响到整个视觉任务的结果。在之前的一些工作中,相关的研究者普遍将 FLOPs 或者 Params 作为优化目的,但是在工业界真实落地的场景中,推理速度才是考量模型好坏的重要指标,然而,推理速度和准确性很难兼得。考虑到工业界有很多基于 Intel CPU 的应用,所以我们本次的工作旨在使骨干网络更好的适应 Intel CPU,从而得到一个速度更快、准确率更高的轻量级骨干网络,与此同时,目标检测、语义分割等下游视觉任务的性能也同样得到提升。 近年来,有很多轻量级的骨干网络问世,尤其最近两年,各种 NAS 搜索出的网络层出不穷,这些网络要么主打 FLOPs 或者 Params 上的优势,要么主打 ARM 设备上的推理速度的优势,很少有网络专门针对 Intel CPU 做特定的优化,导致这些网络在 Intel CPU 端的推理速度并不是很完美。基于此,我们针对 Intel CPU 设备以及其加速库 MKLDNN 设计了特定的骨干网络 PP-LCNet,比起其他的轻量级的 SOTA 模型,该骨干网络可以在不增加推理时间的情况下,进一步提升模型的性能,最终大幅度超越现有的 SOTA 模型。与其他模型的对比图如下。 ![](../../images/PP-LCNet/PP-LCNet-Acc.png) - -## 3. 方法 + + +### 1.2 模型细节 网络结构整体如下图所示。 ![](../../images/PP-LCNet/PP-LCNet.png) 我们经过大量的实验发现,在基于 Intel CPU 设备上,尤其当启用 MKLDNN 加速库后,很多看似不太耗时的操作反而会增加延时,比如 elementwise-add 操作、split-concat 结构等。所以最终我们选用了结构尽可能精简、速度尽可能快的 block 组成我们的 BaseNet(类似 MobileNetV1)。基于 BaseNet,我们通过实验,总结了四条几乎不增加延时但是可以提升模型精度的方法,融合这四条策略,我们组合成了 PP-LCNet。下面对这四条策略一一介绍: - -### 3.1 更好的激活函数 + + +#### 1.2.1 更好的激活函数 自从卷积神经网络使用了 ReLU 激活函数后,网络性能得到了大幅度提升,近些年 ReLU 激活函数的变体也相继出现,如 Leaky-ReLU、P-ReLU、ELU 等,2017 年,谷歌大脑团队通过搜索的方式得到了 swish 激活函数,该激活函数在轻量级网络上表现优异,在 2019 年,MobileNetV3 的作者将该激活函数进一步优化为 H-Swish,该激活函数去除了指数运算,速度更快,网络精度几乎不受影响。我们也经过很多实验发现该激活函数在轻量级网络上有优异的表现。所以在 PP-LCNet 中,我们选用了该激活函数。 - -### 3.2 合适的位置添加 SE 模块 + + +#### 1.2.2 合适的位置添加 SE 模块 SE 模块是 SENet 提出的一种通道注意力机制,可以有效提升模型的精度。但是在 Intel CPU 端,该模块同样会带来较大的延时,如何平衡精度和速度是我们要解决的一个问题。虽然在 MobileNetV3 等基于 NAS 搜索的网络中对 SE 模块的位置进行了搜索,但是并没有得出一般的结论,我们通过实验发现,SE 模块越靠近网络的尾部对模型精度的提升越大。下表也展示了我们的一些实验结果: | SE Location | Top-1 Acc(\%) | Latency(ms) | -|-------------------|---------------|-------------| +|:--:|:--:|:--:| | 1100000000000 | 61.73 | 2.06 | | 0000001100000 | 62.17 | 2.03 | | 0000000000011 | 63.14 | 2.05 | @@ -59,13 +82,14 @@ SE 模块是 SENet 提出的一种通道注意力机制,可以有效提升模 最终,PP-LCNet 中的 SE 模块的位置选用了表格中第三行的方案。 - -### 3.3 合适的位置添加更大的卷积核 + + +#### 1.2.3 合适的位置添加更大的卷积核 在 MixNet 的论文中,作者分析了卷积核大小对模型性能的影响,结论是在一定范围内大的卷积核可以提升模型的性能,但是超过这个范围会有损模型的性能,所以作者组合了一种 split-concat 范式的 MixConv,这种组合虽然可以提升模型的性能,但是不利于推理。我们通过实验总结了一些更大的卷积核在不同位置的作用,类似 SE 模块的位置,更大的卷积核在网络的中后部作用更明显,下表展示了 5x5 卷积核的位置对精度的影响: | large-kernel Location | Top-1 Acc(\%) | Latency(ms) | -|-------------------|---------------|-------------| +|:--:|:--:|:--:| | 1111111111111 | 63.22 | 2.08 | | 1111111000000 | 62.70 | 2.07 | | 0000001111111 | 63.14 | 2.05 | @@ -73,48 +97,51 @@ SE 模块是 SENet 提出的一种通道注意力机制,可以有效提升模 实验表明,更大的卷积核放在网络的中后部即可达到放在所有位置的精度,与此同时,获得更快的推理速度。PP-LCNet 最终选用了表格中第三行的方案。 - -### 3.4 GAP 后使用更大的 1x1 卷积层 + + +#### 1.2.4 GAP 后使用更大的 1x1 卷积层 在 GoogLeNet 之后,GAP(Global-Average-Pooling)后往往直接接分类层,但是在轻量级网络中,这样会导致 GAP 后提取的特征没有得到进一步的融合和加工。如果在此后使用一个更大的 1x1 卷积层(等同于 FC 层),GAP 后的特征便不会直接经过分类层,而是先进行了融合,并将融合的特征进行分类。这样可以在不影响模型推理速度的同时大大提升准确率。 BaseNet 经过以上四个方面的改进,得到了 PP-LCNet。下表进一步说明了每个方案对结果的影响: | Activation | SE-block | Large-kernel | last-1x1-conv | Top-1 Acc(\%) | Latency(ms) | -|------------|----------|--------------|---------------|---------------|-------------| +|:--:|:--:|:--:|:--:|:--:|:--:| | 0 | 1 | 1 | 1 | 61.93 | 1.94 | | 1 | 0 | 1 | 1 | 62.51 | 1.87 | | 1 | 1 | 0 | 1 | 62.44 | 2.01 | | 1 | 1 | 1 | 0 | 59.91 | 1.85 | | 1 | 1 | 1 | 1 | 63.14 | 2.05 | - -## 4. 实验部分 + + +### 1.3 实验结果 - -### 4.1 图像分类 + + +#### 1.3.1 图像分类 图像分类我们选用了 ImageNet 数据集,相比目前主流的轻量级网络,PP-LCNet 在相同精度下可以获得更快的推理速度。当使用百度自研的 SSLD 蒸馏策略后,精度进一步提升,在 Intel cpu 端约 5ms 的推理速度下 ImageNet 的 Top-1 Acc 超过了 80%。 -| Model | Params(M) | FLOPs(M) | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) | -|-------|-----------|----------|---------------|---------------|-------------| -| PPLCNet_x0_25 | 1.5 | 18 | 51.86 | 75.65 | 1.74 | -| PPLCNet_x0_35 | 1.6 | 29 | 58.09 | 80.83 | 1.92 | -| PPLCNet_x0_5 | 1.9 | 47 | 63.14 | 84.66 | 2.05 | -| PPLCNet_x0_75 | 2.4 | 99 | 68.18 | 88.30 | 2.29 | -| PPLCNet_x1_0 | 3.0 | 161 | 71.32 | 90.03 | 2.46 | -| PPLCNet_x1_5 | 4.5 | 342 | 73.71 | 91.53 | 3.19 | -| PPLCNet_x2_0 | 6.5 | 590 | 75.18 | 92.27 | 4.27 | -| PPLCNet_x2_5 | 9.0 | 906 | 76.60 | 93.00 | 5.39 | -| PPLCNet_x0_5_ssld | 1.9 | 47 | 66.10 | 86.46 | 2.05 | -| PPLCNet_x1_0_ssld | 3.0 | 161 | 74.39 | 92.09 | 2.46 | -| PPLCNet_x2_5_ssld | 9.0 | 906 | 80.82 | 95.33 | 5.39 | +| Model | Params(M) | FLOPs(M) | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) | 预训练模型下载地址 | inference模型下载地址 | +|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +| PPLCNet_x0_25 | 1.5 | 18 | 51.86 | 75.65 | 1.74 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar) | +| PPLCNet_x0_35 | 1.6 | 29 | 58.09 | 80.83 | 1.92 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar) | +| PPLCNet_x0_5 | 1.9 | 47 | 63.14 | 84.66 | 2.05 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar) | +| PPLCNet_x0_75 | 2.4 | 99 | 68.18 | 88.30 | 2.29 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar) | +| PPLCNet_x1_0 | 3.0 | 161 | 71.32 | 90.03 | 2.46 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar) | +| PPLCNet_x1_5 | 4.5 | 342 | 73.71 | 91.53 | 3.19 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar) | +| PPLCNet_x2_0 | 6.5 | 590 | 75.18 | 92.27 | 4.27 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar) | +| PPLCNet_x2_5 | 9.0 | 906 | 76.60 | 93.00 | 5.39 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar) | +| PPLCNet_x0_5_ssld | 1.9 | 47 | 66.10 | 86.46 | 2.05 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_ssld_infer.tar) | +| PPLCNet_x1_0_ssld | 3.0 | 161 | 74.39 | 92.09 | 2.46 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_ssld_infer.tar) | +| PPLCNet_x2_5_ssld | 9.0 | 906 | 80.82 | 95.33 | 5.39 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_ssld_infer.tar) | 其中 `_ssld` 表示使用 `SSLD 蒸馏`后的模型。关于 `SSLD蒸馏` 的内容,详情 [SSLD 蒸馏](../advanced_tutorials/knowledge_distillation.md)。 与其他轻量级网络的性能对比: | Model | Params(M) | FLOPs(M) | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) | -|-------|-----------|----------|---------------|---------------|-------------| +|:--:|:--:|:--:|:--:|:--:|:--:| | MobileNetV2_x0_25 | 1.5 | 34 | 53.21 | 76.52 | 2.47 | | MobileNetV3_small_x0_35 | 1.7 | 15 | 53.03 | 76.37 | 3.02 | | ShuffleNetV2_x0_33 | 0.6 | 24 | 53.73 | 77.05 | 4.30 | @@ -128,50 +155,75 @@ BaseNet 经过以上四个方面的改进,得到了 PP-LCNet。下表进一步 | MobileNetV3_small_x1_25 | 3.6 | 100 | 70.67 | 89.51 | 3.95 | | PPLCNet_x1_0 | 3.0 | 161 | 71.32 | 90.03 | 2.46 | - -### 4.2 目标检测 + + +#### 1.3.2 目标检测 目标检测的方法我们选用了百度自研的 PicoDet,该方法主打轻量级目标检测场景,下表展示了在 COCO 数据集上、backbone 选用 PP-LCNet 与 MobileNetV3 的结果的比较,无论在精度还是速度上,PP-LCNet 的优势都非常明显。 | Backbone | mAP(%) | Latency(ms) | -|-------|-----------|----------| +|:--:|:--:|:--:| MobileNetV3_large_x0_35 | 19.2 | 8.1 | PPLCNet_x0_5 | 20.3 | 6.0 | MobileNetV3_large_x0_75 | 25.8 | 11.1 | PPLCNet_x1_0 | 26.9 | 7.9 | - -### 4.3 语义分割 + + +#### 1.3.3 语义分割 语义分割的方法我们选用了 DeeplabV3+,下表展示了在 Cityscapes 数据集上、backbone 选用 PP-LCNet 与 MobileNetV3 的比较,在精度和速度方面,PP-LCNet 的优势同样明显。 | Backbone | mIoU(%) | Latency(ms) | -|-------|-----------|----------| +|:--:|:--:|:--:| MobileNetV3_large_x0_5 | 55.42 | 135 | PPLCNet_x0_5 | 58.36 | 82 | MobileNetV3_large_x0_75 | 64.53 | 151 | PPLCNet_x1_0 | 66.03 | 96 | - + -## 5. 基于 V100 GPU 的预测速度 +## 1.4 Benchmark -| Models | Crop Size | Resize Short Size | FP32
Batch Size=1
(ms) | FP32
Batch Size=1\4
(ms) | FP32
Batch Size=8
(ms) | -| ------------- | --------- | ----------------- | ---------------------------- | -------------------------------- | ------------------------------ | -| PPLCNet_x0_25 | 224 | 256 | 0.72 | 1.17 | 1.71 | -| PPLCNet_x0_35 | 224 | 256 | 0.69 | 1.21 | 1.82 | -| PPLCNet_x0_5 | 224 | 256 | 0.70 | 1.32 | 1.94 | -| PPLCNet_x0_75 | 224 | 256 | 0.71 | 1.49 | 2.19 | -| PPLCNet_x1_0 | 224 | 256 | 0.73 | 1.64 | 2.53 | -| PPLCNet_x1_5 | 224 | 256 | 0.82 | 2.06 | 3.12 | -| PPLCNet_x2_0 | 224 | 256 | 0.94 | 2.58 | 4.08 | + + +#### 1.4.1 基于 Intel Xeon Gold 6148 的预测速度 + +| Model | Latency(ms)
bs=1, thread=10 | +|:--:|:--:| +| PPLCNet_x0_25 | 1.74 | +| PPLCNet_x0_35 | 1.92 | +| PPLCNet_x0_5 | 2.05 | +| PPLCNet_x0_75 | 2.29 | +| PPLCNet_x1_0 | 2.46 | +| PPLCNet_x1_5 | 3.19 | +| PPLCNet_x2_0 | 4.27 | +| PPLCNet_x2_5 | 5.39 | + +**备注:** 精度类型为 FP32,推理过程使用 MKLDNN。 - + + +#### 1.4.2 基于 V100 GPU 的预测速度 + +| Models | Latency(ms)
bs=1 | Latency(ms)
bs=4 | Latency(ms)
bs=8 | +| :--: | :--:| :--: | :--: | +| PPLCNet_x0_25 | 0.72 | 1.17 | 1.71 | +| PPLCNet_x0_35 | 0.69 | 1.21 | 1.82 | +| PPLCNet_x0_5 | 0.70 | 1.32 | 1.94 | +| PPLCNet_x0_75 | 0.71 | 1.49 | 2.19 | +| PPLCNet_x1_0 | 0.73 | 1.64 | 2.53 | +| PPLCNet_x1_5 | 0.82 | 2.06 | 3.12 | +| PPLCNet_x2_0 | 0.94 | 2.58 | 4.08 | + +**备注:** 精度类型为 FP32,推理过程使用 TensorRT。 + + -## 6. 基于 SD855 的预测速度 +#### 1.4.3 基于 SD855 的预测速度 -| Models | SD855 time(ms)
bs=1, thread=1 | SD855 time(ms)
bs=1, thread=2 | SD855 time(ms)
bs=1, thread=4 | -| ------------- | -------------------------------- | --------------------------------- | --------------------------------- | +| Models | Latency(ms)
bs=1, thread=1 | Latency(ms)
bs=1, thread=2 | Latency(ms)
bs=1, thread=4 | +| :--: | :--: | :--: | :--: | | PPLCNet_x0_25 | 2.30 | 1.62 | 1.32 | | PPLCNet_x0_35 | 3.15 | 2.11 | 1.64 | | PPLCNet_x0_5 | 4.27 | 2.73 | 1.92 | @@ -180,16 +232,326 @@ MobileNetV3_large_x0_75 | 64.53 | 151 | | PPLCNet_x1_5 | 20.55 | 12.26 | 7.54 | | PPLCNet_x2_0 | 33.79 | 20.17 | 12.10 | | PPLCNet_x2_5 | 49.89 | 29.60 | 17.82 | + +**备注:** 精度类型为 FP32。 - + + +## 2. 模型快速体验 -## 7. 总结 + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +* 在命令行中使用 PPLCNet_x1_0 的权重快速预测 + +```bash +paddleclas --model_name=PPLCNet_x1_0 --infer_imgs="docs/images/inference_deployment/whl_demo.jpg" +``` + +结果如下: +``` +>>> result +class_ids: [8, 7, 86, 81, 85], scores: [0.91347, 0.03779, 0.0036, 0.00117, 0.00112], label_names: ['hen', 'cock', 'partridge', 'ptarmigan', 'quail'], filename: docs/images/inference_deployment/whl_demo.jpg +Predict complete! +``` + +**备注**: 更换 PPLCNet 的其他 scale 的模型时,只需替换 `model_name`,如将此时的模型改为 `PPLCNet_x2_0` 时,只需要将 `--model_name=PPLCNet_x1_0` 改为 `--model_name=PPLCNet_x2_0` 即可。 + + +* 在 Python 代码中预测 +```python +from paddleclas import PaddleClas +clas = PaddleClas(model_name='PPLCNet_x1_0') +infer_imgs='docs/images/inference_deployment/whl_demo.jpg' +result=clas.predict(infer_imgs) +print(next(result)) +``` + +**备注**:`PaddleClas.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭 +代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果。返回结果示例如下: + +``` +>>> result +[{'class_ids': [8, 7, 86, 81, 85], 'scores': [0.91347, 0.03779, 0.0036, 0.00117, 0.00112], 'label_names': ['hen', 'cock', 'partridge', 'ptarmigan', 'quail'], 'filename': 'docs/images/inference_deployment/whl_demo.jpg'}] +``` + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考 [Paddle 安装教程](../installation/install_paddle.md) 以及 [PaddleClas 安装教程](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + +请在[ImageNet 官网](https://www.image-net.org/)准备 ImageNet-1k 相关的数据。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,将下载好的数据命名为 `ILSVRC2012` ,存放于此。 `ILSVRC2012` 目录中具有以下数据: + +``` +├── train +│   ├── n01440764 +│   │   ├── n01440764_10026.JPEG +│   │   ├── n01440764_10027.JPEG +├── train_list.txt +... +├── val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +├── val_list.txt +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml` 中提供了 PPLCNet_x1_0 训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +``` + + +**备注:** + +* 当前精度最佳的模型会保存在 `output/PPLCNet_x1_0/best_model.pdparams` + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` -PP-LCNet 没有像学术界那样死扣极致的 FLOPs 与 Params,而是着眼于分析如何添加对 Intel CPU 友好的模块来提升模型的性能,这样可以更好的平衡准确率和推理时间,其中的实验结论也很适合其他网络结构设计的研究者,同时也为 NAS 搜索研究者提供了更小的搜索空间和一般结论。最终的 PP-LCNet 在产业界也可以更好的落地和应用。 +其中 `-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 - + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [8, 7, 86, 81, 85], 'scores': [0.91347, 0.03779, 0.0036, 0.00117, 0.00112], 'file_name': 'docs/images/inference_deployment/whl_demo.jpg', 'label_names': ['hen', 'cock', 'partridge', 'ptarmigan', 'quail']}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNet_x1_0/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `docs/images/inference_deployment/whl_demo.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 默认输出的是 Top-5 的值,如果希望输出 Top-k 的值,可以指定`-o Infer.PostProcess.topk=k`,其中,`k` 为您指定的值。 + + + + + +## 4. 模型推理部署 + + + +### 4.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + + +### 4.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml \ + -o Global.pretrained_model=output/PPLCNet_x1_0/best_model \ + -o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNet_x1_0_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + + +### 4.1.2 直接下载 inference 模型 + +[4.1.1 小节](#4.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar && tar -xf PPLCNet_x1_0_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNet_x1_0_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 4.2 基于 Python 预测引擎推理 + + + + +#### 4.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/ImageNet/ILSVRC2012_val_00000010.jpeg` 进行分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPLCNet_x1_0_infer +# 使用下面的命令使用 CPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPLCNet_x1_0_infer -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [153, 265, 204, 283, 229], score(s): [0.61, 0.11, 0.05, 0.03, 0.02], label_name(s): ['Maltese dog, Maltese terrier, Maltese', 'toy poodle', 'Lhasa, Lhasa apso', 'Persian cat', 'Old English sheepdog, bobtail'] +``` + + + +#### 4.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPLCNet_x1_0_infer -o Global.infer_imgs=images/ImageNet/ +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [153, 265, 204, 283, 229], score(s): [0.61, 0.11, 0.05, 0.03, 0.02], label_name(s): ['Maltese dog, Maltese terrier, Maltese', 'toy poodle', 'Lhasa, Lhasa apso', 'Persian cat', 'Old English sheepdog, bobtail'] +ILSVRC2012_val_00010010.jpeg: class id(s): [695, 551, 507, 531, 419], score(s): [0.11, 0.06, 0.03, 0.03, 0.03], label_name(s): ['padlock', 'face powder', 'combination lock', 'digital watch', 'Band Aid'] +ILSVRC2012_val_00020010.jpeg: class id(s): [178, 211, 209, 210, 236], score(s): [0.87, 0.03, 0.01, 0.00, 0.00], label_name(s): ['Weimaraner', 'vizsla, Hungarian pointer', 'Chesapeake Bay retriever', 'German short-haired pointer', 'Doberman, Doberman pinscher'] +ILSVRC2012_val_00030010.jpeg: class id(s): [80, 23, 93, 81, 99], score(s): [0.87, 0.01, 0.01, 0.01, 0.00], label_name(s): ['black grouse', 'vulture', 'hornbill', 'ptarmigan', 'goose'] +``` + + + + +### 4.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 4.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 4.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 4.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](@shuilong)来完成相应的部署工作。 + + + -## 8. 引用 +## 5. 引用 如果你的论文用到了 PP-LCNet 的方法,请添加如下 cite: ``` diff --git a/docs/zh_CN/models/PP-LCNetV2.md b/docs/zh_CN/models/PP-LCNetV2.md new file mode 100644 index 0000000000000000000000000000000000000000..01498478c1ee39fa651c1f6c6bd53a0b768fc241 --- /dev/null +++ b/docs/zh_CN/models/PP-LCNetV2.md @@ -0,0 +1,428 @@ +# PP-LCNetV2 + +--- + +## 目录 + +- [1. 模型介绍](#1) + - [1.1 模型简介](#1.1) + - [1.2 模型细节](#1.2) + - [1.2.1 Rep 策略](#1.2.1) + - [1.2.2 PW 卷积](#1.2.2) + - [1.2.3 Shortcut](#1.2.3) + - [1.2.4 激活函数](#1.2.4) + - [1.2.5 SE 模块](#1.2.5) + - [1.3 实验结果](#1.3) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型推理部署](#4) + - [4.1 推理模型准备](#4.1) + - [4.1.1 基于训练得到的权重导出 inference 模型](#4.1.1) + - [4.1.2 直接下载 inference 模型](#4.1.2) + - [4.2 基于 Python 预测引擎推理](#4.2) + - [4.2.1 预测单张图像](#4.2.1) + - [4.2.2 基于文件夹的批量预测](#4.2.2) + - [4.3 基于 C++ 预测引擎推理](#4.3) + - [4.4 服务化部署](#4.4) + - [4.5 端侧部署](#4.5) + - [4.6 Paddle2ONNX 模型转换与预测](#4.6) + + + +## 1. 模型介绍 + + + +### 1.1 模型简介 + +骨干网络对计算机视觉下游任务的影响不言而喻,不仅对下游模型的性能影响很大,而且模型效率也极大地受此影响,但现有的大多骨干网络在真实应用中的效率并不理想,特别是缺乏针对 Intel CPU 平台所优化的骨干网络,我们测试了现有的主流轻量级模型,发现在 Intel CPU 平台上的效率并不理想,然而目前 Intel CPU 平台在工业界仍有大量使用场景,因此我们提出了 PP-LCNet 系列模型,PP-LCNetV2 是在 [PP-LCNetV1](./PP-LCNet.md) 基础上所改进的。 + + + +## 1.2 模型细节 + +![](../../images/PP-LCNetV2/net.png) + +PP-LCNetV2 模型的网络整体结构如上图所示。PP-LCNetV2 模型是在 PP-LCNetV1 的基础上优化而来,主要使用重参数化策略组合了不同大小卷积核的深度卷积,并优化了点卷积、Shortcut等。 + + + +### 1.2.1 Rep 策略 + +卷积核的大小决定了卷积层感受野的大小,通过组合使用不同大小的卷积核,能够获取不同尺度的特征,因此 PPLCNetV2 在 Stage3、Stage4 中,在同一层组合使用 kernel size 分别为 5、3、1 的 DW 卷积,同时为了避免对模型效率的影响,使用重参数化(Re parameterization,Rep)策略对同层的 DW 卷积进行融合,如下图所示。 + +![](../../images/PP-LCNetV2/rep.png) + + + +### 1.2.2 PW 卷积 + +深度可分离卷积通常由一层 DW 卷积和一层 PW 卷积组成,用以替换标准卷积,为了使深度可分离卷积具有更强的拟合能力,我们尝试使用两层 PW 卷积,同时为了控制模型效率不受影响,两层 PW 卷积设置为:第一个在通道维度对特征图压缩,第二个再通过放大还原特征图通道,如下图所示。通过实验发现,该策略能够显著提高模型性能,同时为了平衡对模型效率带来的影响,PPLCNetV2 仅在 Stage4、Stage5 中使用了该策略。 + +![](../../images/PP-LCNetV2/split_pw.png) + + + +### 1.2.3 Shortcut + +残差结构(residual)自提出以来,被诸多模型广泛使用,但在轻量级卷积神经网络中,由于残差结构所带来的元素级(element-wise)加法操作,会对模型的速度造成影响,我们在 PP-LCNetV2 中,以 Stage 为单位实验了 残差结构对模型的影响,发现残差结构的使用并非一定会带来性能的提高,因此 PPLCNetV2 仅在最后一个 Stage 中的使用了残差结构:在 Block 中增加 Shortcut,如下图所示。 + +![](../../images/PP-LCNetV2/shortcut.png) + + + +### 1.2.4 激活函数 + +在目前的轻量级卷积神经网络中,ReLU、Hard-Swish 激活函数最为常用,虽然在模型性能方面,Hard-Swish 通常更为优秀,然而我们发现部分推理平台对于 Hard-Swish 激活函数的效率优化并不理想,因此为了兼顾通用性,PP-LCNetV2 默认使用了 ReLU 激活函数,并且我们测试发现,ReLU 激活函数对于较大模型的性能影响较小。 + + + +### 1.2.5 SE 模块 + +虽然 SE 模块能够显著提高模型性能,但其对模型速度的影响同样不可忽视,在 PP-LCNetV1 中,我们发现在模型中后部使用 SE 模块能够获得最大化的收益。在 PP-LCNetV2 的优化过程中,我们以 Stage 为单位对 SE 模块的位置做了进一步实验,并发现在 Stage3 中使用能够取得更好的平衡。 + + + +## 1.3 实验结果 + +PPLCNetV2 目前提供的模型的精度、速度指标及预训练权重链接如下: + +| Model | Params(M) | FLOPs(M) | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) | 预训练模型下载地址 | inference模型下载地址 | +|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +| PPLCNetV2_base | 6.6 | 604 | 77.04 | 93.27 | 4.32 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar) | +| PPLCNetV2_base_ssld | 6.6 | 604 | 80.07 | 94.87 | 4.32 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_ssld_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_ssld_infer.tar) | + +**备注:** + +* 1. `_ssld` 表示使用 `SSLD 蒸馏`后的模型。关于 `SSLD蒸馏` 的内容,详情 [SSLD 蒸馏](../advanced_tutorials/knowledge_distillation.md)。 +* 2. PP-LCNetV2 更多模型指标及权重,敬请期待。 + +在不使用额外数据的前提下,PPLCNetV2_base 模型在图像分类 ImageNet 数据集上能够取得超过 77% 的 Top1 Acc,同时在 Intel CPU 平台的推理时间在 4.4 ms 以下,如下表所示,其中推理时间基于 Intel(R) Xeon(R) Gold 6271C CPU @ 2.60GHz 硬件平台,OpenVINO 推理平台。 + +| Model | Params(M) | FLOPs(M) | Top-1 Acc(\%) | Top-5 Acc(\%) | Latency(ms) | +|:--:|:--:|:--:|:--:|:--:|:--:| +| MobileNetV3_Large_x1_25 | 7.4 | 714 | 76.4 | 93.00 | 5.19 | +| PPLCNetV2_x2_5 | 9 | 906 | 76.60 | 93.00 | 7.25 | +| PPLCNetV2_base | 6.6 | 604 | 77.04 | 93.27 | 4.32 | +| PPLCNetV2_base_ssld | 6.6 | 604 | 80.07 | 94.87 | 4.32 | + + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + + +### 2.3 预测 + +* 在命令行中使用 PPLCNetV2_base 的权重快速预测 + +```bash +paddleclas --model_name=PPLCNetV2_base --infer_imgs="docs/images/inference_deployment/whl_demo.jpg" +``` + +结果如下: +``` +>>> result +class_ids: [8, 7, 86, 82, 83], scores: [0.8859, 0.07156, 0.00588, 0.00047, 0.00034], label_names: ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'prairie chicken, prairie grouse, prairie fowl'], filename: docs/images/inference_deployment/whl_demo.jpg +Predict complete +``` + + +* 在 Python 代码中预测 +```python +from paddleclas import PaddleClas +clas = PaddleClas(model_name='PPLCNetV2_base') +infer_imgs='docs/images/inference_deployment/whl_demo.jpg' +result=clas.predict(infer_imgs) +print(next(result)) +``` + +**备注**:`PaddleClas.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭 +代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果。返回结果示例如下: + +``` +>>> result +[{'class_ids': [8, 7, 86, 82, 83], 'scores': [0.8859, 0.07156, 0.00588, 0.00047, 0.00034], 'label_names': ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'prairie chicken, prairie grouse, prairie fowl'], 'filename': 'docs/images/inference_deployment/whl_demo.jpg'}] +``` + + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考文档[环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + +请在[ImageNet 官网](https://www.image-net.org/)准备 ImageNet-1k 相关的数据。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,将下载好的数据命名为 `ILSVRC2012` ,存放于此。 `ILSVRC2012` 目录中具有以下数据: + +``` +├── train +│   ├── n01440764 +│   │   ├── n01440764_10026.JPEG +│   │   ├── n01440764_10027.JPEG +├── train_list.txt +... +├── val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +├── val_list.txt +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml` 中提供了 PPLCNetV2_base 训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +``` + + +**备注:** + +* 当前精度最佳的模型会保存在 `output/PPLCNetV2_base/best_model.pdparams` + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml \ + -o Global.pretrained_model=output/PPLCNetV2_base/best_model +``` + +其中 `-o Global.pretrained_model="output/PPLCNetV2_base/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml \ + -o Global.pretrained_model=output/PPLCNetV2_base/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [8, 7, 86, 82, 83], 'scores': [0.8859, 0.07156, 0.00588, 0.00047, 0.00034], 'file_name': 'docs/images/inference_deployment/whl_demo.jpg', 'label_names': ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'prairie chicken, prairie grouse, prairie fowl']}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/PPLCNetV2_base/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `docs/images/inference_deployment/whl_demo.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 默认输出的是 Top-5 的值,如果希望输出 Top-k 的值,可以指定`-o Infer.PostProcess.topk=k`,其中,`k` 为您指定的值。 + + + + + +## 4. 模型推理部署 + + + +### 4.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + + +### 4.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml \ + -o Global.pretrained_model=output/PPLCNetV2_base/best_model \ + -o Global.save_inference_dir=deploy/models/PPLCNetV2_base_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `PPLCNetV2_base_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNetV2_base_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + + +### 4.1.2 直接下载 inference 模型 + +[4.1.1 小节](#4.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar && tar -xf PPLCNetV2_base_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── PPLCNetV2_base_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 4.2 基于 Python 预测引擎推理 + + + + +#### 4.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/ImageNet/ILSVRC2012_val_00000010.jpeg` 进行分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPLCNetV2_base_infer +# 使用下面的命令使用 CPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPLCNetV2_base_infer -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [332, 153, 229, 204, 265], score(s): [0.28, 0.25, 0.03, 0.02, 0.02], label_name(s): ['Angora, Angora rabbit', 'Maltese dog, Maltese terrier, Maltese', 'Old English sheepdog, bobtail', 'Lhasa, Lhasa apso', 'toy poodle'] +``` + + + +#### 4.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/PPLCNetV2_base_infer -o Global.infer_imgs=images/ImageNet/ +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [332, 153, 229, 204, 265], score(s): [0.28, 0.25, 0.03, 0.02, 0.02], label_name(s): ['Angora, Angora rabbit', 'Maltese dog, Maltese terrier, Maltese', 'Old English sheepdog, bobtail', 'Lhasa, Lhasa apso', 'toy poodle'] +ILSVRC2012_val_00010010.jpeg: class id(s): [626, 531, 761, 487, 673], score(s): [0.64, 0.06, 0.03, 0.02, 0.01], label_name(s): ['lighter, light, igniter, ignitor', 'digital watch', 'remote control, remote', 'cellular telephone, cellular phone, cellphone, cell, mobile phone', 'mouse, computer mouse'] +ILSVRC2012_val_00020010.jpeg: class id(s): [178, 209, 246, 181, 211], score(s): [0.97, 0.00, 0.00, 0.00, 0.00], label_name(s): ['Weimaraner', 'Chesapeake Bay retriever', 'Great Dane', 'Bedlington terrier', 'vizsla, Hungarian pointer'] +ILSVRC2012_val_00030010.jpeg: class id(s): [80, 143, 81, 137, 98], score(s): [0.91, 0.01, 0.00, 0.00, 0.00], label_name(s): ['black grouse', 'oystercatcher, oyster catcher', 'ptarmigan', 'American coot, marsh hen, mud hen, water hen, Fulica americana', 'red-breasted merganser, Mergus serrator' +``` + + + + +### 4.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 4.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 4.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 4.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../../deploy/paddle2onnx/readme.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/models/ResNet.md b/docs/zh_CN/models/ResNet.md new file mode 100644 index 0000000000000000000000000000000000000000..7a3f4f6335cab1fff2a4e46555af5e1461c41429 --- /dev/null +++ b/docs/zh_CN/models/ResNet.md @@ -0,0 +1,440 @@ +# ResNet 系列 +----- +## 目录 + +- [1. 模型介绍](#1) + - [1.1 模型简介](#1.1) + - [1.2 模型指标](#1.2) + - [1.3 Benchmark](#1.3) + - [1.3.1 基于 V100 GPU 的预测速度](#1.3.1) + - [1.3.2 基于 T4 GPU 的预测速度](#1.3.2) +- [2. 模型快速体验](#2) + - [2.1 安装 paddlepaddle](#2.1) + - [2.2 安装 paddleclas](#2.2) + - [2.3 预测](#2.3) +- [3. 模型训练、评估和预测](#3) + - [3.1 环境配置](#3.1) + - [3.2 数据准备](#3.2) + - [3.3 模型训练](#3.3) + - [3.4 模型评估](#3.4) + - [3.5 模型预测](#3.5) +- [4. 模型推理部署](#4) + - [4.1 推理模型准备](#4.1) + - [4.1.1 基于训练得到的权重导出 inference 模型](#4.1.1) + - [4.1.2 直接下载 inference 模型](#4.1.2) + - [4.2 基于 Python 预测引擎推理](#4.2) + - [4.2.1 预测单张图像](#4.2.1) + - [4.2.2 基于文件夹的批量预测](#4.2.2) + - [4.3 基于 C++ 预测引擎推理](#4.3) + - [4.4 服务化部署](#4.4) + - [4.5 端侧部署](#4.5) + - [4.6 Paddle2ONNX 模型转换与预测](#4.6) + + + +## 1. 模型介绍 + + + +### 1.1 模型简介 + +ResNet 系列模型是在 2015 年提出的,一举在 ILSVRC2015 比赛中取得冠军,top5 错误率为 3.57%。该网络创新性的提出了残差结构,通过堆叠多个残差结构从而构建了 ResNet 网络。实验表明使用残差块可以有效地提升收敛速度和精度。 + +斯坦福大学的 Joyce Xu 将 ResNet 称为「真正重新定义了我们看待神经网络的方式」的三大架构之一。由于 ResNet 卓越的性能,越来越多的来自学术界和工业界学者和工程师对其结构进行了改进,比较出名的有 Wide-ResNet, ResNet-vc, ResNet-vd, Res2Net 等,其中 ResNet-vc 与 ResNet-vd 的参数量和计算量与 ResNet 几乎一致,所以在此我们将其与 ResNet 统一归为 ResNet 系列。 + +PaddleClas 提供的 ResNet 系列的模型包括 ResNet50,ResNet50_vd,ResNet50_vd_ssld,ResNet200_vd 等 16 个预训练模型。在训练层面上,ResNet 的模型采用了训练 ImageNet 的标准训练流程,而其余改进版模型采用了更多的训练策略,如 learning rate 的下降方式采用了 cosine decay,引入了 label smoothing 的标签正则方式,在数据预处理加入了 mixup 的操作,迭代总轮数从 120 个 epoch 增加到 200 个 epoch。 + +其中,后缀使用`_ssld`的模型采用了 SSLD 知识蒸馏,保证模型结构不变的情况下,进一步提升了模型的精度。 + + + + +### 1.2 模型指标 + +| Models | Top1 | Top5 | Reference
top1 | Reference
top5 | FLOPs
(G) | Params
(M) | +|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +| ResNet18 | 0.710 | 0.899 | 0.696 | 0.891 | 3.660 | 11.690 | +| ResNet18_vd | 0.723 | 0.908 | | | 4.140 | 11.710 | +| ResNet34 | 0.746 | 0.921 | 0.732 | 0.913 | 7.360 | 21.800 | +| ResNet34_vd | 0.760 | 0.930 | | | 7.390 | 21.820 | +| ResNet34_vd_ssld | 0.797 | 0.949 | | | 7.390 | 21.820 | +| ResNet50 | 0.765 | 0.930 | 0.760 | 0.930 | 8.190 | 25.560 | +| ResNet50_vc | 0.784 | 0.940 | | | 8.670 | 25.580 | +| ResNet50_vd | 0.791 | 0.944 | 0.792 | 0.946 | 8.670 | 25.580 | +| ResNet101 | 0.776 | 0.936 | 0.776 | 0.938 | 15.520 | 44.550 | +| ResNet101_vd | 0.802 | 0.950 | | | 16.100 | 44.570 | +| ResNet152 | 0.783 | 0.940 | 0.778 | 0.938 | 23.050 | 60.190 | +| ResNet152_vd | 0.806 | 0.953 | | | 23.530 | 60.210 | +| ResNet200_vd | 0.809 | 0.953 | | | 30.530 | 74.740 | +| ResNet50_vd_ssld | 0.830 | 0.964 | | | 8.670 | 25.580 | +| Fix_ResNet50_vd_ssld | 0.840 | 0.970 | | | 17.696 | 25.580 | +| ResNet101_vd_ssld | 0.837 | 0.967 | | | 16.100 | 44.570 | + +**备注:** `Fix_ResNet50_vd_ssld` 是固定 `ResNet50_vd_ssld` 除 FC 层外所有的网络参数,在 320x320 的图像输入分辨率下,基于 ImageNet-1k 数据集微调得到。 + + + + +## 1.3 Benchmark + + + +### 1.3.1 基于 V100 GPU 的预测速度 + +| Models | Size | Latency(ms)
bs=1 | Latency(ms)
bs=4 | Latency(ms)
bs=8 | +|:--:|:--:|:--:|:--:|:--:| +| ResNet18 | 224 | 1.22 | 2.19 | 3.63 | +| ResNet18_vd | 224 | 1.26 | 2.28 | 3.89 | +| ResNet34 | 224 | 1.97 | 3.25 | 5.70 | +| ResNet34_vd | 224 | 2.00 | 3.28 | 5.84 | +| ResNet34_vd_ssld | 224 | 2.00 | 3.26 | 5.85 | +| ResNet50 | 224 | 2.54 | 4.79 | 7.40 | +| ResNet50_vc | 224 | 2.57 | 4.83 | 7.52 | +| ResNet50_vd | 224 | 2.60 | 4.86 | 7.63 | +| ResNet101 | 224 | 4.37 | 8.18 | 12.38 | +| ResNet101_vd | 224 | 4.43 | 8.25 | 12.60 | +| ResNet152 | 224 | 6.05 | 11.41 | 17.33 | +| ResNet152_vd | 224 | 6.11 | 11.51 | 17.59 | +| ResNet200_vd | 224 | 7.70 | 14.57 | 22.16 | +| ResNet50_vd_ssld | 224 | 2.59 | 4.87 | 7.62 | +| ResNet101_vd_ssld | 224 | 4.43 | 8.25 | 12.58 | + +**备注:** 精度类型为 FP32,推理过程使用 TensorRT。 + + + +### 1.3.2 基于 T4 GPU 的预测速度 + +| Models | Size | Latency(ms)
FP16
bs=1 | Latency(ms)
FP16
bs=4 | Latency(ms)
FP16
bs=8 | Latency(ms)
FP32
bs=1 | Latency(ms)
FP32
bs=4 | Latency(ms)
FP32
bs=8 | +|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +| ResNet18 | 224 | 1.3568 | 2.5225 | 3.61904 | 1.45606 | 3.56305 | 6.28798 | +| ResNet18_vd | 224 | 1.39593 | 2.69063 | 3.88267 | 1.54557 | 3.85363 | 6.88121 | +| ResNet34 | 224 | 2.23092 | 4.10205 | 5.54904 | 2.34957 | 5.89821 | 10.73451 | +| ResNet34_vd | 224 | 2.23992 | 4.22246 | 5.79534 | 2.43427 | 6.22257 | 11.44906 | +| ResNet34_vd_ssld | 224 | 2.23992 | 4.22246 | 5.79534 | 2.43427 | 6.22257 | 11.44906 | +| ResNet50 | 224 | 2.63824 | 4.63802 | 7.02444 | 3.47712 | 7.84421 | 13.90633 | +| ResNet50_vc | 224 | 2.67064 | 4.72372 | 7.17204 | 3.52346 | 8.10725 | 14.45577 | +| ResNet50_vd | 224 | 2.65164 | 4.84109 | 7.46225 | 3.53131 | 8.09057 | 14.45965 | +| ResNet101 | 224 | 5.04037 | 7.73673 | 10.8936 | 6.07125 | 13.40573 | 24.3597 | +| ResNet101_vd | 224 | 5.05972 | 7.83685 | 11.34235 | 6.11704 | 13.76222 | 25.11071 | +| ResNet152 | 224 | 7.28665 | 10.62001 | 14.90317 | 8.50198 | 19.17073 | 35.78384 | +| ResNet152_vd | 224 | 7.29127 | 10.86137 | 15.32444 | 8.54376 | 19.52157 | 36.64445 | +| ResNet200_vd | 224 | 9.36026 | 13.5474 | 19.0725 | 10.80619 | 25.01731 | 48.81399 | +| ResNet50_vd_ssld | 224 | 2.65164 | 4.84109 | 7.46225 | 3.53131 | 8.09057 | 14.45965 | +| Fix_ResNet50_vd_ssld | 320 | 3.42818 | 7.51534 | 13.19370 | 5.07696 | 14.64218 | 27.01453 | +| ResNet101_vd_ssld | 224 | 5.05972 | 7.83685 | 11.34235 | 6.11704 | 13.76222 | 25.11071 | + +**备注:** 推理过程使用 TensorRT。 + + + +## 2. 模型快速体验 + + + +### 2.1 安装 paddlepaddle + +- 您的机器安装的是 CUDA9 或 CUDA10,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple +``` + +- 您的机器是CPU,请运行以下命令安装 + +```bash +python3 -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +更多的版本需求,请参照[飞桨官网安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + + +### 2.2 安装 paddleclas + +使用如下命令快速安装 paddleclas + +``` +pip3 install paddleclas +``` + + +### 2.3 预测 + +* 在命令行中使用 ResNet50 的权重快速预测 + +```bash +paddleclas --model_name=ResNet50 --infer_imgs="docs/images/inference_deployment/whl_demo.jpg" +``` + +结果如下: +``` +>>> result +class_ids: [8, 7, 86, 82, 80], scores: [0.97968, 0.02028, 3e-05, 1e-05, 0.0], label_names: ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'black grouse'], filename: docs/images/inference_deployment/whl_demo.jpg +Predict complete! +``` + +**备注**: 更换 ResNet 的其他 scale 的模型时,只需替换 `model_name`,如将此时的模型改为 `ResNet18` 时,只需要将 `--model_name=ResNet50` 改为 `--model_name=ResNet18` 即可。 + + +* 在 Python 代码中预测 +```python +from paddleclas import PaddleClas +clas = PaddleClas(model_name='ResNet50') +infer_imgs = 'docs/images/inference_deployment/whl_demo.jpg' +result = clas.predict(infer_imgs) +print(next(result)) +``` + +**备注**:`PaddleClas.predict()` 为可迭代对象(`generator`),因此需要使用 `next()` 函数或 `for` 循环对其迭 +代调用。每次调用将以 `batch_size` 为单位进行一次预测,并返回预测结果。返回结果示例如下: + +``` +>>> result +[{'class_ids': [8, 7, 86, 82, 80], 'scores': [0.97968, 0.02028, 3e-05, 1e-05, 0.0], 'label_names': ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'black grouse'], 'filename': 'docs/images/inference_deployment/whl_demo.jpg'}] +``` + + + + +## 3. 模型训练、评估和预测 + + + +### 3.1 环境配置 + +* 安装:请先参考 [Paddle 安装教程](../installation/install_paddle.md) 以及 [PaddleClas 安装教程](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 + + + +### 3.2 数据准备 + +请在[ImageNet 官网](https://www.image-net.org/)准备 ImageNet-1k 相关的数据。 + + +进入 PaddleClas 目录。 + +``` +cd path_to_PaddleClas +``` + +进入 `dataset/` 目录,将下载好的数据命名为 `ILSVRC2012` ,存放于此。 `ILSVRC2012` 目录中具有以下数据: + +``` +├── train +│   ├── n01440764 +│   │   ├── n01440764_10026.JPEG +│   │   ├── n01440764_10027.JPEG +├── train_list.txt +... +├── val +│   ├── ILSVRC2012_val_00000001.JPEG +│   ├── ILSVRC2012_val_00000002.JPEG +├── val_list.txt +``` + +其中 `train/` 和 `val/` 分别为训练集和验证集。`train_list.txt` 和 `val_list.txt` 分别为训练集和验证集的标签文件。 + +**备注:** + +* 关于 `train_list.txt`、`val_list.txt`的格式说明,可以参考[PaddleClas分类数据集格式说明](../data_preparation/classification_dataset.md#1-数据集格式说明) 。 + + + + +### 3.3 模型训练 + + +在 `ppcls/configs/ImageNet/ResNet/ResNet50.yaml` 中提供了 ResNet50 训练配置,可以通过如下脚本启动训练: + +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python3 -m paddle.distributed.launch \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +``` + + +**备注:** + +* 当前精度最佳的模型会保存在 `output/ResNet50/best_model.pdparams` + + + +### 3.4 模型评估 + +训练好模型之后,可以通过以下命令实现对模型指标的评估。 + +```bash +python3 tools/eval.py \ + -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml \ + -o Global.pretrained_model=output/ResNet50/best_model +``` + +其中 `-o Global.pretrained_model="output/ResNet50/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + + + +### 3.5 模型预测 + +模型训练完成之后,可以加载训练得到的预训练模型,进行模型预测。在模型库的 `tools/infer.py` 中提供了完整的示例,只需执行下述命令即可完成模型预测: + +```python +python3 tools/infer.py \ + -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml \ + -o Global.pretrained_model=output/ResNet50/best_model +``` + +输出结果如下: + +``` +[{'class_ids': [8, 7, 86, 82, 80], 'scores': [0.97968, 0.02028, 3e-05, 1e-05, 0.0], 'file_name': 'docs/images/inference_deployment/whl_demo.jpg', 'label_names': ['hen', 'cock', 'partridge', 'ruffed grouse, partridge, Bonasa umbellus', 'black grouse']}] +``` + +**备注:** + +* 这里`-o Global.pretrained_model="output/ResNet50/best_model"` 指定了当前最佳权重所在的路径,如果指定其他权重,只需替换对应的路径即可。 + +* 默认是对 `docs/images/inference_deployment/whl_demo.jpg` 进行预测,此处也可以通过增加字段 `-o Infer.infer_imgs=xxx` 对其他图片预测。 + +* 默认输出的是 Top-5 的值,如果希望输出 Top-k 的值,可以指定`-o Infer.PostProcess.topk=k`,其中,`k` 为您指定的值。 + + + + + +## 4. 模型推理部署 + + + +### 4.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +当使用 Paddle Inference 推理时,加载的模型类型为 inference 模型。本案例提供了两种获得 inference 模型的方法,如果希望得到和文档相同的结果,请选择[直接下载 inference 模型](#6.1.2)的方式。 + + + + +### 4.1.1 基于训练得到的权重导出 inference 模型 + +此处,我们提供了将权重和模型转换的脚本,执行该脚本可以得到对应的 inference 模型: + +```bash +python3 tools/export_model.py \ + -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml \ + -o Global.pretrained_model=output/ResNet50/best_model \ + -o Global.save_inference_dir=deploy/models/ResNet50_infer +``` +执行完该脚本后会在 `deploy/models/` 下生成 `ResNet50_infer` 文件夹,`models` 文件夹下应有如下文件结构: + +``` +├── ResNet50_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + + +### 4.1.2 直接下载 inference 模型 + +[4.1.1 小节](#4.1.1)提供了导出 inference 模型的方法,此处也提供了该场景可以下载的 inference 模型,可以直接下载体验。 + +``` +cd deploy/models +# 下载 inference 模型并解压 +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar && tar -xf ResNet50_infer.tar +``` + +解压完毕后,`models` 文件夹下应有如下文件结构: + +``` +├── ResNet50_infer +│ ├── inference.pdiparams +│ ├── inference.pdiparams.info +│ └── inference.pdmodel +``` + + + +### 4.2 基于 Python 预测引擎推理 + + + + +#### 4.2.1 预测单张图像 + +返回 `deploy` 目录: + +``` +cd ../ +``` + +运行下面的命令,对图像 `./images/ImageNet/ILSVRC2012_val_00000010.jpeg` 进行分类。 + +```shell +# 使用下面的命令使用 GPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/ResNet50_infer +# 使用下面的命令使用 CPU 进行预测 +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/ResNet50_infer -o Global.use_gpu=False +``` + +输出结果如下。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [153, 332, 229, 204, 265], score(s): [0.41, 0.39, 0.05, 0.04, 0.04], label_name(s): ['Maltese dog, Maltese terrier, Maltese', 'Angora, Angora rabbit', 'Old English sheepdog, bobtail', 'Lhasa, Lhasa apso', 'toy poodle'] +``` + + + +#### 4.2.2 基于文件夹的批量预测 + +如果希望预测文件夹内的图像,可以直接修改配置文件中的 `Global.infer_imgs` 字段,也可以通过下面的 `-o` 参数修改对应的配置。 + +```shell +# 使用下面的命令使用 GPU 进行预测,如果希望使用 CPU 预测,可以在命令后面添加 -o Global.use_gpu=False +python3 python/predict_cls.py -c configs/inference_cls.yaml -o Global.inference_model_dir=models/ResNet50_infer -o Global.infer_imgs=images/ImageNet/ +``` + +终端中会输出该文件夹内所有图像的分类结果,如下所示。 + +``` +ILSVRC2012_val_00000010.jpeg: class id(s): [153, 332, 229, 204, 265], score(s): [0.41, 0.39, 0.05, 0.04, 0.04], label_name(s): ['Maltese dog, Maltese terrier, Maltese', 'Angora, Angora rabbit', 'Old English sheepdog, bobtail', 'Lhasa, Lhasa apso', 'toy poodle'] +ILSVRC2012_val_00010010.jpeg: class id(s): [902, 626, 531, 487, 761], score(s): [0.47, 0.10, 0.05, 0.04, 0.03], label_name(s): ['whistle', 'lighter, light, igniter, ignitor', 'digital watch', 'cellular telephone, cellular phone, cellphone, cell, mobile phone', 'remote control, remote'] +ILSVRC2012_val_00020010.jpeg: class id(s): [178, 211, 246, 236, 210], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['Weimaraner', 'vizsla, Hungarian pointer', 'Great Dane', 'Doberman, Doberman pinscher', 'German short-haired pointer'] +ILSVRC2012_val_00030010.jpeg: class id(s): [80, 23, 83, 93, 136], score(s): [1.00, 0.00, 0.00, 0.00, 0.00], label_name(s): ['black grouse', 'vulture', 'prairie chicken, prairie grouse, prairie fowl', 'hornbill', 'European gallinule, Porphyrio porphyrio'] +``` + + + + +### 4.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 4.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 4.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 4.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](@shuilong)来完成相应的部署工作。 diff --git a/docs/zh_CN/models/SwinTransformer.md b/docs/zh_CN/models/SwinTransformer.md index 40a873274312f0fe3925bb57a8141183dc91562f..df29b0a0c99754196bd3871536013b4f67aa2447 100644 --- a/docs/zh_CN/models/SwinTransformer.md +++ b/docs/zh_CN/models/SwinTransformer.md @@ -1,21 +1,37 @@ # SwinTransformer ---- + +----- ## 目录 -* [1. 概述](#1) -* [2. 精度、FLOPS 和参数量](#2) -* [3. 基于V100 GPU 的预测速度](#3) +- [1. 模型介绍](#1) + - [1.1 模型简介](#1.1) + - [1.2 模型指标](#1.2) + - [1.3 Benchmark](#1.3) + - [1.3.1 基于 V100 GPU 的预测速度](#1.3.1) +- [2. 模型快速体验](#2) +- [3. 模型训练、评估和预测](#3) +- [4. 模型推理部署](#4) + - [4.1 推理模型准备](#4.1) + - [4.2 基于 Python 预测引擎推理](#4.2) + - [4.3 基于 C++ 预测引擎推理](#4.3) + - [4.4 服务化部署](#4.4) + - [4.5 端侧部署](#4.5) + - [4.6 Paddle2ONNX 模型转换与预测](#4.6) + -## 1. 概述 +## 1. 模型介绍 + +### 1.1 模型简介 + Swin Transformer 是一种新的视觉 Transformer 网络,可以用作计算机视觉领域的通用骨干网路。SwinTransformer 由移动窗口(shifted windows)表示的层次 Transformer 结构组成。移动窗口将自注意计算限制在非重叠的局部窗口上,同时允许跨窗口连接,从而提高了网络性能。[论文地址](https://arxiv.org/abs/2103.14030)。 -## 2. 精度、FLOPS 和参数量 +### 1.2 模型指标 -| Models | Top1 | Top5 | Reference
top1 | Reference
top5 | FLOPS
(G) | Params
(M) | +| Models | Top1 | Top5 | Reference
top1 | Reference
top5 | FLOPs
(G) | Params
(M) | |:--:|:--:|:--:|:--:|:--:|:--:|:--:| | SwinTransformer_tiny_patch4_window7_224 | 0.8069 | 0.9534 | 0.812 | 0.955 | 4.5 | 28 | | SwinTransformer_small_patch4_window7_224 | 0.8275 | 0.9613 | 0.832 | 0.962 | 8.7 | 50 | @@ -32,17 +48,87 @@ Swin Transformer 是一种新的视觉 Transformer 网络,可以用作计算 -## 3. 基于 V100 GPU 的预测速度 +### 1.3 Benchmark + +#### 1.3.1 基于 V100 GPU 的预测速度 -| Models | Crop Size | Resize Short Size | FP32
Batch Size=1
(ms) | FP32
Batch Size=4
(ms) | FP32
Batch Size=8
(ms) | -| ------------------------------------------------------- | --------- | ----------------- | ------------------------------ | ------------------------------ | ------------------------------ | -| SwinTransformer_tiny_patch4_window7_224 | 224 | 256 | 6.59 | 9.68 | 16.32 | -| SwinTransformer_small_patch4_window7_224 | 224 | 256 | 12.54 | 17.07 | 28.08 | -| SwinTransformer_base_patch4_window7_224 | 224 | 256 | 13.37 | 23.53 | 39.11 | -| SwinTransformer_base_patch4_window12_384 | 384 | 384 | 19.52 | 64.56 | 123.30 | -| SwinTransformer_base_patch4_window7_224[1] | 224 | 256 | 13.53 | 23.46 | 39.13 | -| SwinTransformer_base_patch4_window12_384[1] | 384 | 384 | 19.65 | 64.72 | 123.42 | -| SwinTransformer_large_patch4_window7_224[1] | 224 | 256 | 15.74 | 38.57 | 71.49 | -| SwinTransformer_large_patch4_window12_384[1] | 384 | 384 | 32.61 | 116.59 | 223.23 | +| Models | Size | Latency(ms)
bs=1 | Latency(ms)
bs=4 | Latency(ms)
bs=8 | +|:--:|:--:|:--:|:--:|:--:| +| SwinTransformer_tiny_patch4_window7_224 | 224 | 6.59 | 9.68 | 16.32 | +| SwinTransformer_small_patch4_window7_224 | 224 | 12.54 | 17.07 | 28.08 | +| SwinTransformer_base_patch4_window7_224 | 224 | 13.37 | 23.53 | 39.11 | +| SwinTransformer_base_patch4_window12_384 | 384 | 19.52 | 64.56 | 123.30 | +| SwinTransformer_base_patch4_window7_224[1] | 224 | 13.53 | 23.46 | 39.13 | +| SwinTransformer_base_patch4_window12_384[1] | 384 | 19.65 | 64.72 | 123.42 | +| SwinTransformer_large_patch4_window7_224[1] | 224 | 15.74 | 38.57 | 71.49 | +| SwinTransformer_large_patch4_window12_384[1] | 384 | 32.61 | 116.59 | 223.23 | [1]:基于 ImageNet22k 数据集预训练,然后在 ImageNet1k 数据集迁移学习得到。 + +**备注:** 精度类型为 FP32,推理过程使用 TensorRT。 + + + + +## 2. 模型快速体验 + +安装 paddlepaddle 和 paddleclas 即可快速对图片进行预测,体验方法可以参考[ResNet50 模型快速体验](./ResNet.md#2-模型快速体验)。 + + + +## 3. 模型训练、评估和预测 + + +此部分内容包括训练环境配置、ImageNet数据的准备、SwinTransformer 在 ImageNet 上的训练、评估、预测等内容。在 `ppcls/configs/ImageNet/SwinTransformer/` 中提供了 SwinTransformer 的训练配置,可以通过如下脚本启动训练:此部分内容可以参考[ResNet50 模型训练、评估和预测](./ResNet.md#3-模型训练评估和预测)。 + +**备注:** 由于 SwinTransformer 系列模型默认使用的 GPU 数量为 8 个,所以在训练时,需要指定8个GPU,如`python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" tools/train.py -c xxx.yaml`, 如果使用 4 个 GPU 训练,默认学习率需要减小一半,精度可能有损。 + + + + +## 4. 模型推理部署 + + + +### 4.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用 MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +Inference 的获取可以参考 [ResNet50 推理模型准备](./ResNet.md#41-推理模型准备) 。 + + + +### 4.2 基于 Python 预测引擎推理 + +PaddleClas 提供了基于 python 预测引擎推理的示例。您可以参考[ResNet50 基于 Python 预测引擎推理](./ResNet.md#42-基于-python-预测引擎推理) 对 SwinTransformer 完成推理预测。 + + + +### 4.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../inference_deployment/cpp_deploy.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../inference_deployment/cpp_deploy_on_windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 4.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../inference_deployment/paddle_serving_deploy.md)来完成相应的部署工作。 + + + +### 4.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../inference_deployment/paddle_lite_deploy.md)来完成相应的部署工作。 + + + +### 4.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](@shuilong)来完成相应的部署工作。 + diff --git a/docs/zh_CN/models_training/distributed_training.md b/docs/zh_CN/models_training/distributed_training.md new file mode 100644 index 0000000000000000000000000000000000000000..59532a5ed3d0c9f676ecb733b42b02052eb79752 --- /dev/null +++ b/docs/zh_CN/models_training/distributed_training.md @@ -0,0 +1,62 @@ + +# 分布式训练 + +## 1. 简介 + +* 分布式训练指的是将训练任务按照一定方法拆分到多个计算节点进行计算,再按照一定的方法对拆分后计算得到的梯度等信息进行聚合与更新。飞桨分布式训练技术源自百度的业务实践,在自然语言处理、计算机视觉、搜索和推荐等领域经过超大规模业务检验。分布式训练的高性能,是飞桨的核心优势技术之一,在图像分类等任务上,分布式训练可以达到几乎线性的加速比。图像分类训练任务中往往包含大量训练数据,以ImageNet为例,ImageNet22k数据集中包含1400W张图像,如果使用单卡训练,会非常耗时。因此PaddleClas中使用分布式训练接口完成训练任务,同时支持单机训练与多机训练。更多关于分布式训练的方法与文档可以参考:[分布式训练快速开始教程](https://fleet-x.readthedocs.io/en/latest/paddle_fleet_rst/parameter_server/ps_quick_start.html)。 + +## 2. 使用方法 + +### 2.1 单机训练 + +* 以识别为例,本地准备好数据之后,使用`paddle.distributed.launch`的接口启动训练任务即可。下面为运行代码示例。 + +```shell +python3 -m paddle.distributed.launch \ + --log_dir=./log/ \ + --gpus "0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml +``` + +### 2.2 多机训练 + +* 相比单机训练,多机训练时,只需要添加`--ips`的参数,该参数表示需要参与分布式训练的机器的ip列表,不同机器的ip用逗号隔开。下面为运行代码示例。 + +```shell +ip_list="192.168.0.1,192.168.0.2" +python3 -m paddle.distributed.launch \ + --log_dir=./log/ \ + --ips="${ip_list}" \ + --gpus="0,1,2,3" \ + tools/train.py \ + -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml +``` + +**注:** +* 不同机器的ip信息需要用逗号隔开,可以通过`ifconfig`或者`ipconfig`查看。 +* 不同机器之间需要做免密设置,且可以直接ping通,否则无法完成通信。 +* 不同机器之间的代码、数据与运行命令或脚本需要保持一致,且所有的机器上都需要运行设置好的训练命令或者脚本。最终`ip_list`中的第一台机器的第一块设备是trainer0,以此类推。 +* 不同机器的起始端口可能不同,建议在启动多机任务前,在不同的机器中设置相同的多机运行起始端口,命令为`export FLAGS_START_PORT=17000`,端口值建议在`10000~20000`之间。 + + +## 3. 性能效果测试 + +* 在单机8卡V100的机器上,基于[SSLD知识蒸馏训练策略](../advanced_tutorials/ssld.md)(数据量500W)进行模型训练,不同模型的训练耗时以及单机8卡加速比情况如下所示。 + + +| 模型 | 精度 | 单机单卡耗时 | 单机8卡耗时 | 加速比 | +|:---------:|:--------:|:--------:|:--------:|:------:| +| PPHGNet-base_ssld | 85.00% | 133.2d | 18.96d | **7.04** | +| PPLCNetv2-base_ssld | 80.10% | 31.6d | 6.4d | **4.93** | +| PPLCNet_x0_25_ssld | 53.43% | 21.8d | 6.2d | **3.99** | + + +* 在4机8卡V100的机器上,基于[SSLD知识蒸馏训练策略](../advanced_tutorials/ssld.md)(数据量500W)进行模型训练,不同模型的训练耗时以及多机加速比情况如下所示。 + + +| 模型 | 精度 | 单机8卡耗时 | 4机8卡耗时 | 加速比 | +|:---------:|:--------:|:--------:|:--------:|:------:| +| PPHGNet-base_ssld | 85.00% | 18.96d | 4.86d | **3.90** | +| PPLCNetv2-base_ssld | 80.10% | 6.4d | 1.67d | **3.83** | +| PPLCNet_x0_25_ssld | 53.43% | 6.2d | 1.78d | **3.48** | diff --git a/docs/zh_CN/quick_start/quick_start_classification_new_user.md b/docs/zh_CN/quick_start/quick_start_classification_new_user.md index 905f62d4dfc68a2bea61c87e7ef3867051d891fc..fdc61193c88b4b8b522842c7685bcdcf315dc4b5 100644 --- a/docs/zh_CN/quick_start/quick_start_classification_new_user.md +++ b/docs/zh_CN/quick_start/quick_start_classification_new_user.md @@ -48,7 +48,7 @@ ## 2. 环境安装与配置 -具体安装步骤可详看[Paddle 安装文档](../installation/install_paddle.md),[PaddleClas 安装文档](../installation/install_paddleclas.md)。 +具体安装步骤可详看[环境准备](../installation/install_paddleclas.md)。 diff --git a/docs/zh_CN/quick_start/quick_start_multilabel_classification.md b/docs/zh_CN/quick_start/quick_start_multilabel_classification.md index 888a61582078c009865317a4cb1b067264aa4082..ea6e691c1ef51fb1371a5ff747c4cfc4fe72a79d 100644 --- a/docs/zh_CN/quick_start/quick_start_multilabel_classification.md +++ b/docs/zh_CN/quick_start/quick_start_multilabel_classification.md @@ -1,6 +1,6 @@ # 多标签分类 quick start -基于 [NUS-WIDE-SCENE](https://lms.comp.nus.edu.sg/wp-content/uploads/2019/research/nuswide/NUS-WIDE.html) 数据集,体验多标签分类的训练、评估、预测的过程,该数据集是 NUS-WIDE 数据集的一个子集。请首先安装 PaddlePaddle 和 PaddleClas,具体安装步骤可详看 [Paddle 安装文档](../installation/install_paddle.md),[PaddleClas 安装文档](../installation/install_paddleclas.md)。 +基于 [NUS-WIDE-SCENE](https://lms.comp.nus.edu.sg/wp-content/uploads/2019/research/nuswide/NUS-WIDE.html) 数据集,体验多标签分类的训练、评估、预测的过程,该数据集是 NUS-WIDE 数据集的一个子集。请首先安装 PaddlePaddle 和 PaddleClas,具体安装步骤可详看 [环境准备](../installation/install_paddleclas.md)。 ## 目录 diff --git a/docs/zh_CN/quick_start/quick_start_recognition.md b/docs/zh_CN/quick_start/quick_start_recognition.md index e2e6b169ea0101239b33612a379fc17207e7ffd3..38803ec9be510d3a4a96117fce3a1ccf537d3af9 100644 --- a/docs/zh_CN/quick_start/quick_start_recognition.md +++ b/docs/zh_CN/quick_start/quick_start_recognition.md @@ -22,7 +22,7 @@ ## 1. 环境配置 -* 安装:请先参考 [Paddle 安装教程](../installation/install_paddle.md) 以及 [PaddleClas 安装教程](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 +* 安装:请先参考文档 [环境准备](../installation/install_paddleclas.md) 配置 PaddleClas 运行环境。 * 进入 `deploy` 运行目录。本部分所有内容与命令均需要在 `deploy` 目录下运行,可以通过下面的命令进入 `deploy` 目录。 diff --git a/docs/zh_CN/samples/Personnel_Access/README.md b/docs/zh_CN/samples/Personnel_Access/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d63c67afea59430cc627458d6f35fd95e2ac59d1 --- /dev/null +++ b/docs/zh_CN/samples/Personnel_Access/README.md @@ -0,0 +1,16 @@ +## 人员出入管理 + +近几年,AI视觉技术在安防、工业制造等场景在产业智能化升级进程中发挥着举足轻重的作用。【进出管控】作为各行业中的关键场景,应用需求十分迫切。 如在居家防盗、机房管控以及景区危险告警等场景中,存在大量对异常目标(人、车或其他物体)不经允许擅自进入规定区域的及时检测需求。利用深度学习视觉技术,可以及时准确地对闯入行为进行识别并发出告警信息。切实保障人员的生命财产安全。相比传统人力监管的方式,不仅可以实现7*24小时不间断的全方位保护,还能极大地降低管理成本,解放劳动力。 + +但在真实产业中,要实现高精度的人员进出识别不是一件容易的事,在实际场景中存在着各种各样的问题: + +**摄像头采集到的图像会受到建筑、机器、车辆等遮挡的影响** + +**天气多种多样,要适应白天、黑夜、雾天和雨天等** + +针对上述场景,本次飞桨产业实践范例库推出了重点区域人员进出管控实践示例,提供从数据准备、技术方案、模型训练优化,到模型部署的全流程可复用方案,有效解决了不同光照、不同天气等室外复杂环境下的图像分类问题,并且极大地降低了数据标注和算力成本,适用于厂区巡检、家居防盗、景区管理等多个产业应用。 + + +![result](./imgs/someone.gif) + +**注**: AI Studio在线运行代码请参考[人员出入管理](https://aistudio.baidu.com/aistudio/projectdetail/4094475) diff --git a/docs/zh_CN/samples/Personnel_Access/imgs/someone.gif b/docs/zh_CN/samples/Personnel_Access/imgs/someone.gif new file mode 100644 index 0000000000000000000000000000000000000000..1f5d684e5455971a636f70540216366166d8d9f8 Binary files /dev/null and b/docs/zh_CN/samples/Personnel_Access/imgs/someone.gif differ diff --git a/paddleclas.py b/paddleclas.py index bfad1931bdec5c305000775a6af891f4d7295244..3b45ca120aebad27ea268a0db3edae72c840d705 100644 --- a/paddleclas.py +++ b/paddleclas.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ import shutil import textwrap import tarfile import requests -import warnings from functools import partial from difflib import SequenceMatcher @@ -32,24 +31,25 @@ import cv2 import numpy as np from tqdm import tqdm from prettytable import PrettyTable +import paddle from deploy.python.predict_cls import ClsPredictor from deploy.utils.get_image_list import get_image_list from deploy.utils import config -from ppcls.arch.backbone import * -from ppcls.utils.logger import init_logger +import ppcls.arch.backbone as backbone +from ppcls.utils import logger # for building model with loading pretrained weights from backbone -init_logger() +logger.init_logger() __all__ = ["PaddleClas"] BASE_DIR = os.path.expanduser("~/.paddleclas/") BASE_INFERENCE_MODEL_DIR = os.path.join(BASE_DIR, "inference_model") BASE_IMAGES_DIR = os.path.join(BASE_DIR, "images") -BASE_DOWNLOAD_URL = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar" -MODEL_SERIES = { +IMN_MODEL_BASE_DOWNLOAD_URL = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar" +IMN_MODEL_SERIES = { "AlexNet": ["AlexNet"], "DarkNet": ["DarkNet53"], "DeiT": [ @@ -100,10 +100,17 @@ MODEL_SERIES = { "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25", "MobileNetV3_small_x1_0_ssld", "MobileNetV3_large_x1_0_ssld" ], + "PPHGNet": [ + "PPHGNet_tiny", + "PPHGNet_small", + "PPHGNet_tiny_ssld", + "PPHGNet_small_ssld", + ], "PPLCNet": [ "PPLCNet_x0_25", "PPLCNet_x0_35", "PPLCNet_x0_5", "PPLCNet_x0_75", "PPLCNet_x1_0", "PPLCNet_x1_5", "PPLCNet_x2_0", "PPLCNet_x2_5" ], + "PPLCNetV2": ["PPLCNetV2_base"], "RedNet": ["RedNet26", "RedNet38", "RedNet50", "RedNet101", "RedNet152"], "RegNet": ["RegNetX_4GF"], "Res2Net": [ @@ -168,6 +175,13 @@ MODEL_SERIES = { ] } +PULC_MODEL_BASE_DOWNLOAD_URL = "https://paddleclas.bj.bcebos.com/models/PULC/inference/{}_infer.tar" +PULC_MODELS = [ + "car_exists", "language_classification", "person_attribute", + "person_exists", "safety_helmet", "text_image_orientation", + "textline_orientation", "traffic_sign", "vehicle_attribute" +] + class ImageTypeError(Exception): """ImageTypeError. @@ -185,76 +199,69 @@ class InputModelError(Exception): super().__init__(message) -def init_config(model_name, - inference_model_dir, - use_gpu=True, - batch_size=1, - topk=5, - **kwargs): - imagenet1k_map_path = os.path.join( - os.path.abspath(__dir__), "ppcls/utils/imagenet1k_label_list.txt") - cfg = { - "Global": { - "infer_imgs": kwargs["infer_imgs"] - if "infer_imgs" in kwargs else False, - "model_name": model_name, - "inference_model_dir": inference_model_dir, - "batch_size": batch_size, - "use_gpu": use_gpu, - "enable_mkldnn": kwargs["enable_mkldnn"] - if "enable_mkldnn" in kwargs else False, - "cpu_num_threads": kwargs["cpu_num_threads"] - if "cpu_num_threads" in kwargs else 1, - "enable_benchmark": False, - "use_fp16": kwargs["use_fp16"] if "use_fp16" in kwargs else False, - "ir_optim": True, - "use_tensorrt": kwargs["use_tensorrt"] - if "use_tensorrt" in kwargs else False, - "gpu_mem": kwargs["gpu_mem"] if "gpu_mem" in kwargs else 8000, - "enable_profile": False - }, - "PreProcess": { - "transform_ops": [{ - "ResizeImage": { - "resize_short": kwargs["resize_short"] - if "resize_short" in kwargs else 256 - } - }, { - "CropImage": { - "size": kwargs["crop_size"] - if "crop_size" in kwargs else 224 - } - }, { - "NormalizeImage": { - "scale": 0.00392157, - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "order": '' - } - }, { - "ToCHWImage": None - }] - }, - "PostProcess": { - "main_indicator": "Topk", - "Topk": { - "topk": topk, - "class_id_map_file": imagenet1k_map_path - } - } - } - if "save_dir" in kwargs: - if kwargs["save_dir"] is not None: - cfg["PostProcess"]["SavePreLabel"] = { - "save_dir": kwargs["save_dir"] - } - if "class_id_map_file" in kwargs: - if kwargs["class_id_map_file"] is not None: - cfg["PostProcess"]["Topk"]["class_id_map_file"] = kwargs[ +def init_config(model_type, model_name, inference_model_dir, **kwargs): + + cfg_path = f"deploy/configs/PULC/{model_name}/inference_{model_name}.yaml" if model_type == "pulc" else "deploy/configs/inference_cls.yaml" + cfg_path = os.path.join(__dir__, cfg_path) + cfg = config.get_config(cfg_path, show=False) + + cfg.Global.inference_model_dir = inference_model_dir + + if "batch_size" in kwargs and kwargs["batch_size"]: + cfg.Global.batch_size = kwargs["batch_size"] + + if "use_gpu" in kwargs and kwargs["use_gpu"]: + cfg.Global.use_gpu = kwargs["use_gpu"] + if cfg.Global.use_gpu and not paddle.device.is_compiled_with_cuda(): + msg = "The current running environment does not support the use of GPU. CPU has been used instead." + logger.warning(msg) + cfg.Global.use_gpu = False + + if "infer_imgs" in kwargs and kwargs["infer_imgs"]: + cfg.Global.infer_imgs = kwargs["infer_imgs"] + if "enable_mkldnn" in kwargs and kwargs["enable_mkldnn"]: + cfg.Global.enable_mkldnn = kwargs["enable_mkldnn"] + if "cpu_num_threads" in kwargs and kwargs["cpu_num_threads"]: + cfg.Global.cpu_num_threads = kwargs["cpu_num_threads"] + if "use_fp16" in kwargs and kwargs["use_fp16"]: + cfg.Global.use_fp16 = kwargs["use_fp16"] + if "use_tensorrt" in kwargs and kwargs["use_tensorrt"]: + cfg.Global.use_tensorrt = kwargs["use_tensorrt"] + if "gpu_mem" in kwargs and kwargs["gpu_mem"]: + cfg.Global.gpu_mem = kwargs["gpu_mem"] + if "resize_short" in kwargs and kwargs["resize_short"]: + cfg.PreProcess.transform_ops[0]["ResizeImage"][ + "resize_short"] = kwargs["resize_short"] + if "crop_size" in kwargs and kwargs["crop_size"]: + cfg.PreProcess.transform_ops[1]["CropImage"]["size"] = kwargs[ + "crop_size"] + + # TODO(gaotingquan): not robust + if "thresh" in kwargs and kwargs[ + "thresh"] and "ThreshOutput" in cfg.PostProcess: + cfg.PostProcess.ThreshOutput.thresh = kwargs["thresh"] + if "Topk" in cfg.PostProcess: + if "topk" in kwargs and kwargs["topk"]: + cfg.PostProcess.Topk.topk = kwargs["topk"] + if "class_id_map_file" in kwargs and kwargs["class_id_map_file"]: + cfg.PostProcess.Topk.class_id_map_file = kwargs[ "class_id_map_file"] + else: + class_id_map_file_path = os.path.relpath( + cfg.PostProcess.Topk.class_id_map_file, "../") + cfg.PostProcess.Topk.class_id_map_file = os.path.join( + __dir__, class_id_map_file_path) + if "VehicleAttribute" in cfg.PostProcess: + if "color_threshold" in kwargs and kwargs["color_threshold"]: + cfg.PostProcess.VehicleAttribute.color_threshold = kwargs[ + "color_threshold"] + if "type_threshold" in kwargs and kwargs["type_threshold"]: + cfg.PostProcess.VehicleAttribute.type_threshold = kwargs[ + "type_threshold"] + + if "save_dir" in kwargs and kwargs["save_dir"]: + cfg.PostProcess.SavePreLabel.save_dir = kwargs["save_dir"] - cfg = config.AttrDict(cfg) - config.create_attr_dict(cfg) return cfg @@ -275,40 +282,48 @@ def args_cfg(): type=str, help="The directory of model files. Valid when model_name not specifed." ) + parser.add_argument("--use_gpu", type=str2bool, help="Whether use GPU.") parser.add_argument( - "--use_gpu", type=str, default=True, help="Whether use GPU.") - parser.add_argument("--gpu_mem", type=int, default=8000, help="") + "--gpu_mem", + type=int, + help="The memory size of GPU allocated to predict.") parser.add_argument( "--enable_mkldnn", type=str2bool, - default=False, help="Whether use MKLDNN. Valid when use_gpu is False") - parser.add_argument("--cpu_num_threads", type=int, default=1, help="") parser.add_argument( - "--use_tensorrt", type=str2bool, default=False, help="") - parser.add_argument("--use_fp16", type=str2bool, default=False, help="") + "--cpu_num_threads", + type=int, + help="The threads number when predicting on CPU.") + parser.add_argument( + "--use_tensorrt", + type=str2bool, + help="Whether use TensorRT to accelerate.") parser.add_argument( - "--batch_size", type=int, default=1, help="Batch size. Default by 1.") + "--use_fp16", type=str2bool, help="Whether use FP16 to predict.") + parser.add_argument("--batch_size", type=int, help="Batch size.") parser.add_argument( "--topk", type=int, - default=5, - help="Return topk score(s) and corresponding results. Default by 5.") + help="Return topk score(s) and corresponding results when Topk postprocess is used." + ) parser.add_argument( "--class_id_map_file", type=str, help="The path of file that map class_id and label.") + parser.add_argument( + "--threshold", + type=float, + help="The threshold of ThreshOutput when postprocess is used.") + parser.add_argument("--color_threshold", type=float, help="") + parser.add_argument("--type_threshold", type=float, help="") parser.add_argument( "--save_dir", type=str, help="The directory to save prediction results as pre-label.") parser.add_argument( - "--resize_short", - type=int, - default=256, - help="Resize according to short size.") - parser.add_argument( - "--crop_size", type=int, default=224, help="Centor crop size.") + "--resize_short", type=int, help="Resize according to short size.") + parser.add_argument("--crop_size", type=int, help="Centor crop size.") args = parser.parse_args() return vars(args) @@ -317,33 +332,45 @@ def args_cfg(): def print_info(): """Print list of supported models in formatted. """ - table = PrettyTable(["Series", "Name"]) + imn_table = PrettyTable(["IMN Model Series", "Model Name"]) + pulc_table = PrettyTable(["PULC Models"]) try: sz = os.get_terminal_size() - width = sz.columns - 30 if sz.columns > 50 else 10 + total_width = sz.columns + first_width = 30 + second_width = total_width - first_width if total_width > 50 else 10 except OSError: - width = 100 - for series in MODEL_SERIES: - names = textwrap.fill(" ".join(MODEL_SERIES[series]), width=width) - table.add_row([series, names]) - width = len(str(table).split("\n")[0]) - print("{}".format("-" * width)) - print("Models supported by PaddleClas".center(width)) - print(table) - print("Powered by PaddlePaddle!".rjust(width)) - print("{}".format("-" * width)) - - -def get_model_names(): + total_width = 100 + second_width = 100 + for series in IMN_MODEL_SERIES: + names = textwrap.fill( + " ".join(IMN_MODEL_SERIES[series]), width=second_width) + imn_table.add_row([series, names]) + + table_width = len(str(imn_table).split("\n")[0]) + pulc_table.add_row([ + textwrap.fill( + " ".join(PULC_MODELS), width=total_width).center(table_width - 4) + ]) + + print("{}".format("-" * table_width)) + print("Models supported by PaddleClas".center(table_width)) + print(imn_table) + print(pulc_table) + print("Powered by PaddlePaddle!".rjust(table_width)) + print("{}".format("-" * table_width)) + + +def get_imn_model_names(): """Get the model names list. """ model_names = [] - for series in MODEL_SERIES: - model_names += (MODEL_SERIES[series]) + for series in IMN_MODEL_SERIES: + model_names += (IMN_MODEL_SERIES[series]) return model_names -def similar_architectures(name="", names=[], thresh=0.1, topk=10): +def similar_model_names(name="", names=[], thresh=0.1, topk=5): """Find the most similar topk model names. """ scores = [] @@ -378,12 +405,17 @@ def download_with_progressbar(url, save_path): f"Something went wrong while downloading file from {url}") -def check_model_file(model_name): +def check_model_file(model_type, model_name): """Check the model files exist and download and untar when no exist. """ - storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR, - model_name) - url = BASE_DOWNLOAD_URL.format(model_name) + if model_type == "pulc": + storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR, + "PULC", model_name) + url = PULC_MODEL_BASE_DOWNLOAD_URL.format(model_name) + else: + storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR, + "IMN", model_name) + url = IMN_MODEL_BASE_DOWNLOAD_URL.format(model_name) tar_file_name_list = [ "inference.pdiparams", "inference.pdiparams.info", "inference.pdmodel" @@ -393,7 +425,7 @@ def check_model_file(model_name): if not os.path.exists(model_file_path) or not os.path.exists( params_file_path): tmp_path = storage_directory(url.split("/")[-1]) - print(f"download {url} to {tmp_path}") + logger.info(f"download {url} to {tmp_path}") os.makedirs(storage_directory(), exist_ok=True) download_with_progressbar(url, tmp_path) with tarfile.open(tmp_path, "r") as tarObj: @@ -421,14 +453,13 @@ class PaddleClas(object): """PaddleClas. """ - print_info() + if not os.environ.get('ppcls', False): + os.environ.setdefault('ppcls', 'True') + print_info() def __init__(self, model_name: str=None, inference_model_dir: str=None, - use_gpu: bool=True, - batch_size: int=1, - topk: int=5, **kwargs): """Init PaddleClas with config. @@ -440,9 +471,11 @@ class PaddleClas(object): topk (int, optional): Return the top k prediction results with the highest score. Defaults to 5. """ super().__init__() - self._config = init_config(model_name, inference_model_dir, use_gpu, - batch_size, topk, **kwargs) - self._check_input_model() + self.model_type, inference_model_dir = self._check_input_model( + model_name, inference_model_dir) + self._config = init_config(self.model_type, model_name, + inference_model_dir, **kwargs) + self.cls_predictor = ClsPredictor(self._config) def get_config(self): @@ -450,24 +483,29 @@ class PaddleClas(object): """ return self._config - def _check_input_model(self): + def _check_input_model(self, model_name, inference_model_dir): """Check input model name or model files. """ - candidate_model_names = get_model_names() - input_model_name = self._config.Global.get("model_name", None) - inference_model_dir = self._config.Global.get("inference_model_dir", - None) - if input_model_name is not None: - similar_names = similar_architectures(input_model_name, - candidate_model_names) - similar_names_str = ", ".join(similar_names) - if input_model_name not in candidate_model_names: - err = f"{input_model_name} is not provided by PaddleClas. \nMaybe you want: [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!" + all_imn_model_names = get_imn_model_names() + all_pulc_model_names = PULC_MODELS + + if model_name: + if model_name in all_imn_model_names: + inference_model_dir = check_model_file("imn", model_name) + return "imn", inference_model_dir + elif model_name in all_pulc_model_names: + inference_model_dir = check_model_file("pulc", model_name) + return "pulc", inference_model_dir + else: + similar_imn_names = similar_model_names(model_name, + all_imn_model_names) + similar_pulc_names = similar_model_names(model_name, + all_pulc_model_names) + similar_names_str = ", ".join(similar_imn_names + + similar_pulc_names) + err = f"{model_name} is not provided by PaddleClas. \nMaybe you want the : [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!" raise InputModelError(err) - self._config.Global.inference_model_dir = check_model_file( - input_model_name) - return - elif inference_model_dir is not None: + elif inference_model_dir: model_file_path = os.path.join(inference_model_dir, "inference.pdmodel") params_file_path = os.path.join(inference_model_dir, @@ -476,11 +514,11 @@ class PaddleClas(object): params_file_path): err = f"There is no model file or params file in this directory: {inference_model_dir}" raise InputModelError(err) - return + return "custom", inference_model_dir else: err = f"Please specify the model name supported by PaddleClas or directory contained model files(inference.pdmodel, inference.pdiparams)." raise InputModelError(err) - return + return None def predict(self, input_data: Union[str, np.array], print_pred: bool=False) -> Generator[list, None, None]: @@ -511,22 +549,21 @@ class PaddleClas(object): os.makedirs(image_storage_dir()) image_save_path = image_storage_dir("tmp.jpg") download_with_progressbar(input_data, image_save_path) - input_data = image_save_path - warnings.warn( + logger.info( f"Image to be predicted from Internet: {input_data}, has been saved to: {image_save_path}" ) + input_data = image_save_path image_list = get_image_list(input_data) batch_size = self._config.Global.get("batch_size", 1) - topk = self._config.PostProcess.Topk.get('topk', 1) img_list = [] img_path_list = [] cnt = 0 - for idx, img_path in enumerate(image_list): + for idx_img, img_path in enumerate(image_list): img = cv2.imread(img_path) if img is None: - warnings.warn( + logger.warning( f"Image file failed to read and has been skipped. The path: {img_path}" ) continue @@ -535,16 +572,15 @@ class PaddleClas(object): img_path_list.append(img_path) cnt += 1 - if cnt % batch_size == 0 or (idx + 1) == len(image_list): + if cnt % batch_size == 0 or (idx_img + 1) == len(image_list): preds = self.cls_predictor.predict(img_list) - if print_pred and preds: - for idx, pred in enumerate(preds): - pred_str = ", ".join( - [f"{k}: {pred[k]}" for k in pred]) - print( - f"filename: {img_path_list[idx]}, top-{topk}, {pred_str}" - ) + if preds: + for idx_pred, pred in enumerate(preds): + pred["filename"] = img_path_list[idx_pred] + if print_pred: + logger.info(", ".join( + [f"{k}: {pred[k]}" for k in pred])) img_list = [] img_path_list = [] @@ -564,7 +600,7 @@ def main(): res = clas_engine.predict(cfg["infer_imgs"], print_pred=True) for _ in res: pass - print("Predict complete!") + logger.info("Predict complete!") return diff --git a/ppcls/arch/__init__.py b/ppcls/arch/__init__.py index da21e101a27eb0db2c05b658346148bda3139c80..4021457961ad9013df79b05594e8424d1c312b10 100644 --- a/ppcls/arch/__init__.py +++ b/ppcls/arch/__init__.py @@ -32,14 +32,19 @@ from ppcls.arch.distill.afd_attention import LinearTransformStudent, LinearTrans __all__ = ["build_model", "RecModel", "DistillationModel", "AttentionModel"] -def build_model(config): +def build_model(config, mode="train"): arch_config = copy.deepcopy(config["Arch"]) model_type = arch_config.pop("name") + use_sync_bn = arch_config.pop("use_sync_bn", False) mod = importlib.import_module(__name__) arch = getattr(mod, model_type)(**arch_config) + if use_sync_bn: + arch = nn.SyncBatchNorm.convert_sync_batchnorm(arch) + if isinstance(arch, TheseusLayer): prune_model(config, arch) - quantize_model(config, arch) + quantize_model(config, arch, mode) + return arch @@ -50,6 +55,7 @@ def apply_to_static(config, model): specs = None if 'image_shape' in config['Global']: specs = [InputSpec([None] + config['Global']['image_shape'])] + specs[0].stop_gradient = True model = to_static(model, input_spec=specs) logger.info("Successfully to apply @to_static with specs: {}".format( specs)) diff --git a/ppcls/arch/backbone/__init__.py b/ppcls/arch/backbone/__init__.py index 9e434e1056933243071f92e829ab9198d66754e0..d3bb4541981fb4c01befc82b3b569a2e098ac92b 100644 --- a/ppcls/arch/backbone/__init__.py +++ b/ppcls/arch/backbone/__init__.py @@ -22,7 +22,9 @@ from ppcls.arch.backbone.legendary_models.vgg import VGG11, VGG13, VGG16, VGG19 from ppcls.arch.backbone.legendary_models.inception_v3 import InceptionV3 from ppcls.arch.backbone.legendary_models.hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W64_C from ppcls.arch.backbone.legendary_models.pp_lcnet import PPLCNet_x0_25, PPLCNet_x0_35, PPLCNet_x0_5, PPLCNet_x0_75, PPLCNet_x1_0, PPLCNet_x1_5, PPLCNet_x2_0, PPLCNet_x2_5 +from ppcls.arch.backbone.legendary_models.pp_lcnet_v2 import PPLCNetV2_base from ppcls.arch.backbone.legendary_models.esnet import ESNet_x0_25, ESNet_x0_5, ESNet_x0_75, ESNet_x1_0 +from ppcls.arch.backbone.legendary_models.pp_hgnet import PPHGNet_tiny, PPHGNet_small, PPHGNet_base from ppcls.arch.backbone.model_zoo.resnet_vc import ResNet50_vc from ppcls.arch.backbone.model_zoo.resnext import ResNeXt50_32x4d, ResNeXt50_64x4d, ResNeXt101_32x4d, ResNeXt101_64x4d, ResNeXt152_32x4d, ResNeXt152_64x4d @@ -50,7 +52,7 @@ from ppcls.arch.backbone.model_zoo.darknet import DarkNet53 from ppcls.arch.backbone.model_zoo.regnet import RegNetX_200MF, RegNetX_4GF, RegNetX_32GF, RegNetY_200MF, RegNetY_4GF, RegNetY_32GF from ppcls.arch.backbone.model_zoo.vision_transformer import ViT_small_patch16_224, ViT_base_patch16_224, ViT_base_patch16_384, ViT_base_patch32_384, ViT_large_patch16_224, ViT_large_patch16_384, ViT_large_patch32_384 from ppcls.arch.backbone.model_zoo.distilled_vision_transformer import DeiT_tiny_patch16_224, DeiT_small_patch16_224, DeiT_base_patch16_224, DeiT_tiny_distilled_patch16_224, DeiT_small_distilled_patch16_224, DeiT_base_distilled_patch16_224, DeiT_base_patch16_384, DeiT_base_distilled_patch16_384 -from ppcls.arch.backbone.model_zoo.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384 +from ppcls.arch.backbone.legendary_models.swin_transformer import SwinTransformer_tiny_patch4_window7_224, SwinTransformer_small_patch4_window7_224, SwinTransformer_base_patch4_window7_224, SwinTransformer_base_patch4_window12_384, SwinTransformer_large_patch4_window7_224, SwinTransformer_large_patch4_window12_384 from ppcls.arch.backbone.model_zoo.cswin_transformer import CSWinTransformer_tiny_224, CSWinTransformer_small_224, CSWinTransformer_base_224, CSWinTransformer_large_224, CSWinTransformer_base_384, CSWinTransformer_large_384 from ppcls.arch.backbone.model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L from ppcls.arch.backbone.model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0 @@ -66,9 +68,12 @@ from ppcls.arch.backbone.model_zoo.mobilevit import MobileViT_XXS, MobileViT_XS, from ppcls.arch.backbone.model_zoo.repvgg import RepVGG_A0, RepVGG_A1, RepVGG_A2, RepVGG_B0, RepVGG_B1, RepVGG_B2, RepVGG_B1g2, RepVGG_B1g4, RepVGG_B2g4, RepVGG_B3g4 from ppcls.arch.backbone.model_zoo.van import VAN_tiny from ppcls.arch.backbone.model_zoo.peleenet import PeleeNet +from ppcls.arch.backbone.model_zoo.convnext import ConvNeXt_tiny + from ppcls.arch.backbone.variant_models.resnet_variant import ResNet50_last_stage_stride1 from ppcls.arch.backbone.variant_models.vgg_variant import VGG19Sigmoid from ppcls.arch.backbone.variant_models.pp_lcnet_variant import PPLCNet_x2_5_Tanh +from ppcls.arch.backbone.model_zoo.adaface_ir_net import AdaFace_IR_18, AdaFace_IR_34, AdaFace_IR_50, AdaFace_IR_101, AdaFace_IR_152, AdaFace_IR_SE_50, AdaFace_IR_SE_101, AdaFace_IR_SE_152, AdaFace_IR_SE_200 # help whl get all the models' api (class type) and components' api (func type) diff --git a/ppcls/arch/backbone/legendary_models/mobilenet_v3.py b/ppcls/arch/backbone/legendary_models/mobilenet_v3.py index b7fc7e9f75db79338af9211782ff7a3c1525b222..3fbf9776bc4f39a5667b01623b7950d362203e9c 100644 --- a/ppcls/arch/backbone/legendary_models/mobilenet_v3.py +++ b/ppcls/arch/backbone/legendary_models/mobilenet_v3.py @@ -154,7 +154,8 @@ class MobileNetV3(TheseusLayer): class_expand=LAST_CONV, dropout_prob=0.2, return_patterns=None, - return_stages=None): + return_stages=None, + **kwargs): super().__init__() self.cfg = config diff --git a/ppcls/arch/backbone/legendary_models/pp_hgnet.py b/ppcls/arch/backbone/legendary_models/pp_hgnet.py new file mode 100644 index 0000000000000000000000000000000000000000..a5add431b025d9b97f0564a671a531d5ab7cd72d --- /dev/null +++ b/ppcls/arch/backbone/legendary_models/pp_hgnet.py @@ -0,0 +1,373 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import KaimingNormal, Constant +from paddle.nn import Conv2D, BatchNorm2D, ReLU, AdaptiveAvgPool2D, MaxPool2D +from paddle.regularizer import L2Decay +from paddle import ParamAttr + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "PPHGNet_tiny": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams", + "PPHGNet_small": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams", + "PPHGNet_base": "" +} + +__all__ = list(MODEL_URLS.keys()) + +kaiming_normal_ = KaimingNormal() +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +class ConvBNAct(TheseusLayer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + use_act=True): + super().__init__() + self.use_act = use_act + self.conv = Conv2D( + in_channels, + out_channels, + kernel_size, + stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias_attr=False) + self.bn = BatchNorm2D( + out_channels, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + if self.use_act: + self.act = ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.use_act: + x = self.act(x) + return x + + +class ESEModule(TheseusLayer): + def __init__(self, channels): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv = Conv2D( + in_channels=channels, + out_channels=channels, + kernel_size=1, + stride=1, + padding=0) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv(x) + x = self.sigmoid(x) + return paddle.multiply(x=identity, y=x) + + +class HG_Block(TheseusLayer): + def __init__( + self, + in_channels, + mid_channels, + out_channels, + layer_num, + identity=False, ): + super().__init__() + self.identity = identity + + self.layers = nn.LayerList() + self.layers.append( + ConvBNAct( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=3, + stride=1)) + for _ in range(layer_num - 1): + self.layers.append( + ConvBNAct( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=3, + stride=1)) + + # feature aggregation + total_channels = in_channels + layer_num * mid_channels + self.aggregation_conv = ConvBNAct( + in_channels=total_channels, + out_channels=out_channels, + kernel_size=1, + stride=1) + self.att = ESEModule(out_channels) + + def forward(self, x): + identity = x + output = [] + output.append(x) + for layer in self.layers: + x = layer(x) + output.append(x) + x = paddle.concat(output, axis=1) + x = self.aggregation_conv(x) + x = self.att(x) + if self.identity: + x += identity + return x + + +class HG_Stage(TheseusLayer): + def __init__(self, + in_channels, + mid_channels, + out_channels, + block_num, + layer_num, + downsample=True): + super().__init__() + self.downsample = downsample + if downsample: + self.downsample = ConvBNAct( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=2, + groups=in_channels, + use_act=False) + + blocks_list = [] + blocks_list.append( + HG_Block( + in_channels, + mid_channels, + out_channels, + layer_num, + identity=False)) + for _ in range(block_num - 1): + blocks_list.append( + HG_Block( + out_channels, + mid_channels, + out_channels, + layer_num, + identity=True)) + self.blocks = nn.Sequential(*blocks_list) + + def forward(self, x): + if self.downsample: + x = self.downsample(x) + x = self.blocks(x) + return x + + +class PPHGNet(TheseusLayer): + """ + PPHGNet + Args: + stem_channels: list. Stem channel list of PPHGNet. + stage_config: dict. The configuration of each stage of PPHGNet. such as the number of channels, stride, etc. + layer_num: int. Number of layers of HG_Block. + use_last_conv: boolean. Whether to use a 1x1 convolutional layer before the classification layer. + class_expand: int=2048. Number of channels for the last 1x1 convolutional layer. + dropout_prob: float. Parameters of dropout, 0.0 means dropout is not used. + class_num: int=1000. The number of classes. + Returns: + model: nn.Layer. Specific PPHGNet model depends on args. + """ + def __init__(self, + stem_channels, + stage_config, + layer_num, + use_last_conv=True, + class_expand=2048, + dropout_prob=0.0, + class_num=1000): + super().__init__() + self.use_last_conv = use_last_conv + self.class_expand = class_expand + + # stem + stem_channels.insert(0, 3) + self.stem = nn.Sequential(* [ + ConvBNAct( + in_channels=stem_channels[i], + out_channels=stem_channels[i + 1], + kernel_size=3, + stride=2 if i == 0 else 1) for i in range( + len(stem_channels) - 1) + ]) + self.pool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) + + # stages + self.stages = nn.LayerList() + for k in stage_config: + in_channels, mid_channels, out_channels, block_num, downsample = stage_config[ + k] + self.stages.append( + HG_Stage(in_channels, mid_channels, out_channels, block_num, + layer_num, downsample)) + + self.avg_pool = AdaptiveAvgPool2D(1) + if self.use_last_conv: + self.last_conv = Conv2D( + in_channels=out_channels, + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.act = nn.ReLU() + self.dropout = nn.Dropout( + p=dropout_prob, mode="downscale_in_infer") + + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + self.fc = nn.Linear(self.class_expand + if self.use_last_conv else out_channels, class_num) + + self._init_weights() + + def _init_weights(self): + for m in self.sublayers(): + if isinstance(m, nn.Conv2D): + kaiming_normal_(m.weight) + elif isinstance(m, (nn.BatchNorm2D)): + ones_(m.weight) + zeros_(m.bias) + elif isinstance(m, nn.Linear): + zeros_(m.bias) + + def forward(self, x): + x = self.stem(x) + x = self.pool(x) + + for stage in self.stages: + x = stage(x) + + x = self.avg_pool(x) + if self.use_last_conv: + x = self.last_conv(x) + x = self.act(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PPHGNet_tiny(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNet_tiny + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_tiny` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [96, 96, 224, 1, False], + "stage2": [224, 128, 448, 1, True], + "stage3": [448, 160, 512, 2, True], + "stage4": [512, 192, 768, 1, True], + } + + model = PPHGNet( + stem_channels=[48, 48, 96], + stage_config=stage_config, + layer_num=5, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_tiny"], use_ssld) + return model + + +def PPHGNet_small(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNet_small + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_small` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [128, 128, 256, 1, False], + "stage2": [256, 160, 512, 1, True], + "stage3": [512, 192, 768, 2, True], + "stage4": [768, 224, 1024, 1, True], + } + + model = PPHGNet( + stem_channels=[64, 64, 128], + stage_config=stage_config, + layer_num=6, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_small"], use_ssld) + return model + + +def PPHGNet_base(pretrained=False, use_ssld=True, **kwargs): + """ + PPHGNet_base + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_base` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [160, 192, 320, 1, False], + "stage2": [320, 224, 640, 2, True], + "stage3": [640, 256, 960, 3, True], + "stage4": [960, 288, 1280, 2, True], + } + + model = PPHGNet( + stem_channels=[96, 96, 160], + stage_config=stage_config, + layer_num=7, + dropout_prob=0.2, + **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPHGNet_base"], use_ssld) + return model diff --git a/ppcls/arch/backbone/legendary_models/pp_lcnet.py b/ppcls/arch/backbone/legendary_models/pp_lcnet.py index 40174622029c15d713525b3968ea2b3dd8a7239a..a4fe6fadb53b19176d03e529a00200a1570c1eed 100644 --- a/ppcls/arch/backbone/legendary_models/pp_lcnet.py +++ b/ppcls/arch/backbone/legendary_models/pp_lcnet.py @@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function import paddle import paddle.nn as nn from paddle import ParamAttr -from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear +from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Dropout, Linear from paddle.regularizer import L2Decay from paddle.nn.initializer import KaimingNormal from ppcls.arch.backbone.base.theseus_layer import TheseusLayer @@ -83,7 +83,8 @@ class ConvBNLayer(TheseusLayer): filter_size, num_filters, stride, - num_groups=1): + num_groups=1, + lr_mult=1.0): super().__init__() self.conv = Conv2D( @@ -93,13 +94,16 @@ class ConvBNLayer(TheseusLayer): stride=stride, padding=(filter_size - 1) // 2, groups=num_groups, - weight_attr=ParamAttr(initializer=KaimingNormal()), + weight_attr=ParamAttr( + initializer=KaimingNormal(), learning_rate=lr_mult), bias_attr=False) - self.bn = BatchNorm( + self.bn = BatchNorm2D( num_filters, - param_attr=ParamAttr(regularizer=L2Decay(0.0)), - bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + weight_attr=ParamAttr( + regularizer=L2Decay(0.0), learning_rate=lr_mult), + bias_attr=ParamAttr( + regularizer=L2Decay(0.0), learning_rate=lr_mult)) self.hardswish = nn.Hardswish() def forward(self, x): @@ -115,7 +119,8 @@ class DepthwiseSeparable(TheseusLayer): num_filters, stride, dw_size=3, - use_se=False): + use_se=False, + lr_mult=1.0): super().__init__() self.use_se = use_se self.dw_conv = ConvBNLayer( @@ -123,14 +128,16 @@ class DepthwiseSeparable(TheseusLayer): num_filters=num_channels, filter_size=dw_size, stride=stride, - num_groups=num_channels) + num_groups=num_channels, + lr_mult=lr_mult) if use_se: - self.se = SEModule(num_channels) + self.se = SEModule(num_channels, lr_mult=lr_mult) self.pw_conv = ConvBNLayer( num_channels=num_channels, filter_size=1, num_filters=num_filters, - stride=1) + stride=1, + lr_mult=lr_mult) def forward(self, x): x = self.dw_conv(x) @@ -141,7 +148,7 @@ class DepthwiseSeparable(TheseusLayer): class SEModule(TheseusLayer): - def __init__(self, channel, reduction=4): + def __init__(self, channel, reduction=4, lr_mult=1.0): super().__init__() self.avg_pool = AdaptiveAvgPool2D(1) self.conv1 = Conv2D( @@ -149,14 +156,18 @@ class SEModule(TheseusLayer): out_channels=channel // reduction, kernel_size=1, stride=1, - padding=0) + padding=0, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult)) self.relu = nn.ReLU() self.conv2 = Conv2D( in_channels=channel // reduction, out_channels=channel, kernel_size=1, stride=1, - padding=0) + padding=0, + weight_attr=ParamAttr(learning_rate=lr_mult), + bias_attr=ParamAttr(learning_rate=lr_mult)) self.hardsigmoid = nn.Hardsigmoid() def forward(self, x): @@ -177,83 +188,125 @@ class PPLCNet(TheseusLayer): class_num=1000, dropout_prob=0.2, class_expand=1280, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + stride_list=[2, 2, 2, 2, 2], + use_last_conv=True, return_patterns=None, - return_stages=None): + return_stages=None, + **kwargs): super().__init__() self.scale = scale self.class_expand = class_expand - + self.lr_mult_list = lr_mult_list + self.use_last_conv = use_last_conv + self.stride_list = stride_list + self.net_config = NET_CONFIG + if isinstance(self.lr_mult_list, str): + self.lr_mult_list = eval(self.lr_mult_list) + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 6, "lr_mult_list length should be 6 but got {}".format( + len(self.lr_mult_list)) + + assert isinstance(self.stride_list, ( + list, tuple + )), "stride_list should be in (list, tuple) but got {}".format( + type(self.stride_list)) + assert len(self.stride_list + ) == 5, "stride_list length should be 5 but got {}".format( + len(self.stride_list)) + + for i, stride in enumerate(stride_list[1:]): + self.net_config["blocks{}".format(i + 3)][0][3] = stride self.conv1 = ConvBNLayer( num_channels=3, filter_size=3, num_filters=make_divisible(16 * scale), - stride=2) + stride=stride_list[0], + lr_mult=self.lr_mult_list[0]) - self.blocks2 = nn.Sequential(* [ + self.blocks2 = nn.Sequential(*[ DepthwiseSeparable( num_channels=make_divisible(in_c * scale), num_filters=make_divisible(out_c * scale), dw_size=k, stride=s, - use_se=se) - for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"]) + use_se=se, + lr_mult=self.lr_mult_list[1]) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks2"]) ]) - self.blocks3 = nn.Sequential(* [ + self.blocks3 = nn.Sequential(*[ DepthwiseSeparable( num_channels=make_divisible(in_c * scale), num_filters=make_divisible(out_c * scale), dw_size=k, stride=s, - use_se=se) - for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks3"]) + use_se=se, + lr_mult=self.lr_mult_list[2]) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks3"]) ]) - self.blocks4 = nn.Sequential(* [ + self.blocks4 = nn.Sequential(*[ DepthwiseSeparable( num_channels=make_divisible(in_c * scale), num_filters=make_divisible(out_c * scale), dw_size=k, stride=s, - use_se=se) - for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks4"]) + use_se=se, + lr_mult=self.lr_mult_list[3]) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks4"]) ]) - self.blocks5 = nn.Sequential(* [ + self.blocks5 = nn.Sequential(*[ DepthwiseSeparable( num_channels=make_divisible(in_c * scale), num_filters=make_divisible(out_c * scale), dw_size=k, stride=s, - use_se=se) - for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks5"]) + use_se=se, + lr_mult=self.lr_mult_list[4]) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks5"]) ]) - self.blocks6 = nn.Sequential(* [ + self.blocks6 = nn.Sequential(*[ DepthwiseSeparable( num_channels=make_divisible(in_c * scale), num_filters=make_divisible(out_c * scale), dw_size=k, stride=s, - use_se=se) - for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks6"]) + use_se=se, + lr_mult=self.lr_mult_list[5]) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks6"]) ]) self.avg_pool = AdaptiveAvgPool2D(1) - - self.last_conv = Conv2D( - in_channels=make_divisible(NET_CONFIG["blocks6"][-1][2] * scale), - out_channels=self.class_expand, - kernel_size=1, - stride=1, - padding=0, - bias_attr=False) - - self.hardswish = nn.Hardswish() - self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + if self.use_last_conv: + self.last_conv = Conv2D( + in_channels=make_divisible(self.net_config["blocks6"][-1][2] * + scale), + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.hardswish = nn.Hardswish() + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + else: + self.last_conv = None self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) - - self.fc = Linear(self.class_expand, class_num) + self.fc = Linear( + self.class_expand if self.use_last_conv else + make_divisible(self.net_config["blocks6"][-1][2]), class_num) super().init_res( stages_pattern, @@ -270,9 +323,10 @@ class PPLCNet(TheseusLayer): x = self.blocks6(x) x = self.avg_pool(x) - x = self.last_conv(x) - x = self.hardswish(x) - x = self.dropout(x) + if self.last_conv is not None: + x = self.last_conv(x) + x = self.hardswish(x) + x = self.dropout(x) x = self.flatten(x) x = self.fc(x) return x diff --git a/ppcls/arch/backbone/legendary_models/pp_lcnet_v2.py b/ppcls/arch/backbone/legendary_models/pp_lcnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..459d84275ac63af54fb9ad10af2bcf2f7759052d --- /dev/null +++ b/ppcls/arch/backbone/legendary_models/pp_lcnet_v2.py @@ -0,0 +1,352 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle import ParamAttr +from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Dropout, Linear +from paddle.regularizer import L2Decay +from paddle.nn.initializer import KaimingNormal +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "PPLCNetV2_base": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + +NET_CONFIG = { + # in_channels, kernel_size, split_pw, use_rep, use_se, use_shortcut + "stage1": [64, 3, False, False, False, False], + "stage2": [128, 3, False, False, False, False], + "stage3": [256, 5, True, True, True, False], + "stage4": [512, 5, False, True, False, True], +} + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNLayer(TheseusLayer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + use_act=True): + super().__init__() + self.use_act = use_act + self.conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + weight_attr=ParamAttr(initializer=KaimingNormal()), + bias_attr=False) + + self.bn = BatchNorm2D( + out_channels, + weight_attr=ParamAttr(regularizer=L2Decay(0.0)), + bias_attr=ParamAttr(regularizer=L2Decay(0.0))) + if self.use_act: + self.act = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.use_act: + x = self.act(x) + return x + + +class SEModule(TheseusLayer): + def __init__(self, channel, reduction=4): + super().__init__() + self.avg_pool = AdaptiveAvgPool2D(1) + self.conv1 = Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.relu = nn.ReLU() + self.conv2 = Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + self.hardsigmoid = nn.Sigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + x = paddle.multiply(x=identity, y=x) + return x + + +class RepDepthwiseSeparable(TheseusLayer): + def __init__(self, + in_channels, + out_channels, + stride, + dw_size=3, + split_pw=False, + use_rep=False, + use_se=False, + use_shortcut=False): + super().__init__() + self.is_repped = False + + self.dw_size = dw_size + self.split_pw = split_pw + self.use_rep = use_rep + self.use_se = use_se + self.use_shortcut = True if use_shortcut and stride == 1 and in_channels == out_channels else False + + if self.use_rep: + self.dw_conv_list = nn.LayerList() + for kernel_size in range(self.dw_size, 0, -2): + if kernel_size == 1 and stride != 1: + continue + dw_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + stride=stride, + groups=in_channels, + use_act=False) + self.dw_conv_list.append(dw_conv) + self.dw_conv = nn.Conv2D( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=dw_size, + stride=stride, + padding=(dw_size - 1) // 2, + groups=in_channels) + else: + self.dw_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=dw_size, + stride=stride, + groups=in_channels) + + self.act = nn.ReLU() + + if use_se: + self.se = SEModule(in_channels) + + if self.split_pw: + pw_ratio = 0.5 + self.pw_conv_1 = ConvBNLayer( + in_channels=in_channels, + kernel_size=1, + out_channels=int(out_channels * pw_ratio), + stride=1) + self.pw_conv_2 = ConvBNLayer( + in_channels=int(out_channels * pw_ratio), + kernel_size=1, + out_channels=out_channels, + stride=1) + else: + self.pw_conv = ConvBNLayer( + in_channels=in_channels, + kernel_size=1, + out_channels=out_channels, + stride=1) + + def forward(self, x): + if self.use_rep: + input_x = x + if self.is_repped: + x = self.act(self.dw_conv(x)) + else: + y = self.dw_conv_list[0](x) + for dw_conv in self.dw_conv_list[1:]: + y += dw_conv(x) + x = self.act(y) + else: + x = self.dw_conv(x) + + if self.use_se: + x = self.se(x) + if self.split_pw: + x = self.pw_conv_1(x) + x = self.pw_conv_2(x) + else: + x = self.pw_conv(x) + if self.use_shortcut: + x = x + input_x + return x + + def rep(self): + if self.use_rep: + self.is_repped = True + kernel, bias = self._get_equivalent_kernel_bias() + self.dw_conv.weight.set_value(kernel) + self.dw_conv.bias.set_value(bias) + + def _get_equivalent_kernel_bias(self): + kernel_sum = 0 + bias_sum = 0 + for dw_conv in self.dw_conv_list: + kernel, bias = self._fuse_bn_tensor(dw_conv) + kernel = self._pad_tensor(kernel, to_size=self.dw_size) + kernel_sum += kernel + bias_sum += bias + return kernel_sum, bias_sum + + def _fuse_bn_tensor(self, branch): + kernel = branch.conv.weight + running_mean = branch.bn._mean + running_var = branch.bn._variance + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn._epsilon + std = (running_var + eps).sqrt() + t = (gamma / std).reshape((-1, 1, 1, 1)) + return kernel * t, beta - running_mean * gamma / std + + def _pad_tensor(self, tensor, to_size): + from_size = tensor.shape[-1] + if from_size == to_size: + return tensor + pad = (to_size - from_size) // 2 + return F.pad(tensor, [pad, pad, pad, pad]) + + +class PPLCNetV2(TheseusLayer): + def __init__(self, + scale, + depths, + class_num=1000, + dropout_prob=0, + use_last_conv=True, + class_expand=1280): + super().__init__() + self.scale = scale + self.use_last_conv = use_last_conv + self.class_expand = class_expand + + self.stem = nn.Sequential(* [ + ConvBNLayer( + in_channels=3, + kernel_size=3, + out_channels=make_divisible(32 * scale), + stride=2), RepDepthwiseSeparable( + in_channels=make_divisible(32 * scale), + out_channels=make_divisible(64 * scale), + stride=1, + dw_size=3) + ]) + + # stages + self.stages = nn.LayerList() + for depth_idx, k in enumerate(NET_CONFIG): + in_channels, kernel_size, split_pw, use_rep, use_se, use_shortcut = NET_CONFIG[ + k] + self.stages.append( + nn.Sequential(* [ + RepDepthwiseSeparable( + in_channels=make_divisible((in_channels if i == 0 else + in_channels * 2) * scale), + out_channels=make_divisible(in_channels * 2 * scale), + stride=2 if i == 0 else 1, + dw_size=kernel_size, + split_pw=split_pw, + use_rep=use_rep, + use_se=use_se, + use_shortcut=use_shortcut) + for i in range(depths[depth_idx]) + ])) + + self.avg_pool = AdaptiveAvgPool2D(1) + + if self.use_last_conv: + self.last_conv = Conv2D( + in_channels=make_divisible(NET_CONFIG["stage4"][0] * 2 * + scale), + out_channels=self.class_expand, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False) + self.act = nn.ReLU() + self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer") + + self.flatten = nn.Flatten(start_axis=1, stop_axis=-1) + in_features = self.class_expand if self.use_last_conv else NET_CONFIG[ + "stage4"][0] * 2 * scale + self.fc = Linear(in_features, class_num) + + def forward(self, x): + x = self.stem(x) + for stage in self.stages: + x = stage(x) + x = self.avg_pool(x) + if self.use_last_conv: + x = self.last_conv(x) + x = self.act(x) + x = self.dropout(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def PPLCNetV2_base(pretrained=False, use_ssld=False, **kwargs): + """ + PPLCNetV2_base + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPLCNetV2_base` model depends on args. + """ + model = PPLCNetV2( + scale=1.0, depths=[2, 2, 6, 2], dropout_prob=0.2, **kwargs) + _load_pretrained(pretrained, model, MODEL_URLS["PPLCNetV2_base"], use_ssld) + return model diff --git a/ppcls/arch/backbone/legendary_models/resnet.py b/ppcls/arch/backbone/legendary_models/resnet.py index 643e860faf022000453e00cad637ef1ad572e0dc..705511f5b5a8ed5aac45636dddb3598aefd4276a 100644 --- a/ppcls/arch/backbone/legendary_models/resnet.py +++ b/ppcls/arch/backbone/legendary_models/resnet.py @@ -20,11 +20,13 @@ import numpy as np import paddle from paddle import ParamAttr import paddle.nn as nn -from paddle.nn import Conv2D, BatchNorm, Linear +from paddle.nn import Conv2D, BatchNorm, Linear, BatchNorm2D from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D from paddle.nn.initializer import Uniform +from paddle.regularizer import L2Decay import math +from ppcls.utils import logger from ppcls.arch.backbone.base.theseus_layer import TheseusLayer from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url @@ -121,17 +123,18 @@ class ConvBNLayer(TheseusLayer): self.is_vd_mode = is_vd_mode self.act = act self.avg_pool = AvgPool2D( - kernel_size=2, stride=2, padding=0, ceil_mode=True) + kernel_size=2, stride=stride, padding="SAME", ceil_mode=True) self.conv = Conv2D( in_channels=num_channels, out_channels=num_filters, kernel_size=filter_size, - stride=stride, + stride=1 if is_vd_mode else stride, padding=(filter_size - 1) // 2, groups=groups, weight_attr=ParamAttr(learning_rate=lr_mult), bias_attr=False, data_format=data_format) + self.bn = BatchNorm( num_filters, param_attr=ParamAttr(learning_rate=lr_mult), @@ -159,7 +162,6 @@ class BottleneckBlock(TheseusLayer): lr_mult=1.0, data_format="NCHW"): super().__init__() - self.conv0 = ConvBNLayer( num_channels=num_channels, num_filters=num_filters, @@ -188,10 +190,11 @@ class BottleneckBlock(TheseusLayer): num_channels=num_channels, num_filters=num_filters * 4, filter_size=1, - stride=stride if if_first else 1, + stride=stride, is_vd_mode=False if if_first else True, lr_mult=lr_mult, data_format=data_format) + self.relu = nn.ReLU() self.shortcut = shortcut @@ -242,7 +245,7 @@ class BasicBlock(TheseusLayer): num_channels=num_channels, num_filters=num_filters, filter_size=1, - stride=stride if if_first else 1, + stride=stride, is_vd_mode=False if if_first else True, lr_mult=lr_mult, data_format=data_format) @@ -281,14 +284,17 @@ class ResNet(TheseusLayer): stem_act="relu", class_num=1000, lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + stride_list=[2, 2, 2, 2, 2], data_format="NCHW", input_image_channel=3, return_patterns=None, - return_stages=None): + return_stages=None, + **kargs): super().__init__() self.cfg = config self.lr_mult_list = lr_mult_list + self.stride_list = stride_list self.is_vd_mode = version == "vd" self.class_num = class_num self.num_filters = [64, 128, 256, 512] @@ -301,15 +307,25 @@ class ResNet(TheseusLayer): list, tuple )), "lr_mult_list should be in (list, tuple) but got {}".format( type(self.lr_mult_list)) - assert len(self.lr_mult_list - ) == 5, "lr_mult_list length should be 5 but got {}".format( - len(self.lr_mult_list)) + if len(self.lr_mult_list) != 5: + msg = "lr_mult_list length should be 5 but got {}, default lr_mult_list used".format( + len(self.lr_mult_list)) + logger.warning(msg) + self.lr_mult_list = [1.0, 1.0, 1.0, 1.0, 1.0] + + assert isinstance(self.stride_list, ( + list, tuple + )), "stride_list should be in (list, tuple) but got {}".format( + type(self.stride_list)) + assert len(self.stride_list + ) == 5, "stride_list length should be 5 but got {}".format( + len(self.stride_list)) self.stem_cfg = { #num_channels, num_filters, filter_size, stride - "vb": [[input_image_channel, 64, 7, 2]], - "vd": - [[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]] + "vb": [[input_image_channel, 64, 7, self.stride_list[0]]], + "vd": [[input_image_channel, 32, 3, self.stride_list[0]], + [32, 32, 3, 1], [32, 64, 3, 1]] } self.stem = nn.Sequential(*[ @@ -325,7 +341,10 @@ class ResNet(TheseusLayer): ]) self.max_pool = MaxPool2D( - kernel_size=3, stride=2, padding=1, data_format=data_format) + kernel_size=3, + stride=stride_list[1], + padding=1, + data_format=data_format) block_list = [] for block_idx in range(len(self.block_depth)): shortcut = False @@ -334,7 +353,8 @@ class ResNet(TheseusLayer): num_channels=self.num_channels[block_idx] if i == 0 else self.num_filters[block_idx] * self.channels_mult, num_filters=self.num_filters[block_idx], - stride=2 if i == 0 and block_idx != 0 else 1, + stride=self.stride_list[block_idx + 1] + if i == 0 and block_idx != 0 else 1, shortcut=shortcut, if_first=block_idx == i == 0 if version == "vd" else True, lr_mult=self.lr_mult_list[block_idx + 1], @@ -378,7 +398,10 @@ def _load_pretrained(pretrained, model, model_url, use_ssld): elif pretrained is True: load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) elif isinstance(pretrained, str): - load_dygraph_pretrain(model, pretrained) + if 'http' in pretrained: + load_dygraph_pretrain_from_url(model, pretrained, use_ssld=False) + else: + load_dygraph_pretrain(model, pretrained) else: raise RuntimeError( "pretrained type is not available. Please use `string` or `boolean` type." diff --git a/ppcls/arch/backbone/legendary_models/swin_transformer.py b/ppcls/arch/backbone/legendary_models/swin_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..c951150151aeaa6d83a9d31d36cef3c6dae88455 --- /dev/null +++ b/ppcls/arch/backbone/legendary_models/swin_transformer.py @@ -0,0 +1,879 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Code was based on https://github.com/microsoft/Swin-Transformer +# reference: https://arxiv.org/abs/2103.14030 + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.initializer import TruncatedNormal, Constant + +from ppcls.arch.backbone.base.theseus_layer import TheseusLayer +from ppcls.arch.backbone.model_zoo.vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "SwinTransformer_tiny_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_small_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_small_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_base_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams", + "SwinTransformer_base_patch4_window12_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window12_384_pretrained.pdparams", + "SwinTransformer_large_patch4_window7_224": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window7_224_22kto1k_pretrained.pdparams", + "SwinTransformer_large_patch4_window12_384": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams", +} + +__all__ = list(MODEL_URLS.keys()) + + +class Mlp(nn.Layer): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.reshape( + [B, H // window_size, window_size, W // window_size, window_size, C]) + windows = x.transpose([0, 1, 3, 2, 4, 5]).reshape( + [-1, window_size, window_size, C]) + return windows + + +def window_reverse(windows, window_size, H, W, C): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + x = windows.reshape( + [-1, H // window_size, W // window_size, window_size, window_size, C]) + x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([-1, H, W, C]) + return x + + +class WindowAttention(nn.Layer): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + # 2*Wh-1 * 2*Ww-1, nH + self.relative_position_bias_table = self.create_parameter( + shape=((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads), + default_initializer=zeros_) + self.add_parameter("relative_position_bias_table", + self.relative_position_bias_table) + + # get pair-wise relative position index for each token inside the window + coords_h = paddle.arange(self.window_size[0]) + coords_w = paddle.arange(self.window_size[1]) + coords = paddle.stack(paddle.meshgrid( + [coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww + + coords_flatten_1 = coords_flatten.unsqueeze(axis=2) + coords_flatten_2 = coords_flatten.unsqueeze(axis=1) + relative_coords = coords_flatten_1 - coords_flatten_2 + + relative_coords = relative_coords.transpose( + [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[ + 0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + + self.register_buffer("relative_position_index", + relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table) + self.softmax = nn.Softmax(axis=-1) + + def eval(self, ): + # this is used to re-param swin for model export + relative_position_bias_table = self.relative_position_bias_table + window_size = self.window_size + index = self.relative_position_index.reshape([-1]) + + relative_position_bias = paddle.index_select( + relative_position_bias_table, index) + relative_position_bias = relative_position_bias.reshape([ + window_size[0] * window_size[1], window_size[0] * window_size[1], + -1 + ]) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.transpose( + [2, 0, 1]) # nH, Wh*Ww, Wh*Ww + relative_position_bias = relative_position_bias.unsqueeze(0) + self.register_buffer("relative_position_bias", relative_position_bias) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape( + [B_, N, 3, self.num_heads, C // self.num_heads]).transpose( + [2, 0, 3, 1, 4]) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = paddle.mm(q, k.transpose([0, 1, 3, 2])) + + if self.training or not hasattr(self, "relative_position_bias"): + index = self.relative_position_index.reshape([-1]) + + relative_position_bias = paddle.index_select( + self.relative_position_bias_table, index) + relative_position_bias = relative_position_bias.reshape([ + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], -1 + ]) # Wh*Ww,Wh*Ww,nH + + relative_position_bias = relative_position_bias.transpose( + [2, 0, 1]) # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + else: + attn = attn + self.relative_position_bias + + if mask is not None: + nW = mask.shape[0] + attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N + ]) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.reshape([-1, self.num_heads, N, N]) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + # x = (attn @ v).transpose(1, 2).reshape([B_, N, C]) + x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([B_, N, C]) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self): + return "dim={}, window_size={}, num_heads={}".format( + self.dim, self.window_size, self.num_heads) + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Layer): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Layer, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, + dim, + input_resolution, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = paddle.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.reshape( + [-1, self.window_size * self.window_size]) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + + huns = -100.0 * paddle.ones_like(attn_mask) + attn_mask = huns * (attn_mask != 0).astype("float32") + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.reshape([B, H, W, C]) + + # cyclic shift + if self.shift_size > 0: + shifted_x = paddle.roll( + x, shifts=(-self.shift_size, -self.shift_size), axis=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.reshape( + [-1, self.window_size * self.window_size, + C]) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn( + x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.reshape( + [-1, self.window_size, self.window_size, C]) + shifted_x = window_reverse(attn_windows, self.window_size, H, W, + C) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = paddle.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + axis=(1, 2)) + else: + x = shifted_x + x = x.reshape([B, H * W, C]) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self): + return "dim={}, input_resolution={}, num_heads={}, window_size={}, shift_size={}, mlp_ratio={}".format( + self.dim, self.input_resolution, self.num_heads, self.window_size, + self.shift_size, self.mlp_ratio) + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Layer): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, "x size ({}*{}) are not even.".format( + H, W) + + x = x.reshape([B, H, W, C]) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = paddle.concat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self): + return "input_resolution={}, dim={}".format(self.input_resolution, + self.dim) + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Layer): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Layer | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.LayerList([ + SwinTransformerBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) for i in range(depth) + ]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self): + return "dim={}, input_resolution={}, depth={}".format( + self.dim, self.input_resolution, self.depth) + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class PatchEmbed(nn.Layer): + """ Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Layer, optional): Normalization layer. Default: None + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2D( + in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # TODO (littletomatodonkey), uncomment the line will cause failure of jit.save + # assert [H, W] == self.img_size[:2], "Input image size ({H}*{W}) doesn't match model ({}*{}).".format(H, W, self.img_size[0], self.img_size[1]) + x = self.proj(x) + + x = x.flatten(2).transpose([0, 2, 1]) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * ( + self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformer(TheseusLayer): + """ Swin Transformer + A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Layer): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, + img_size=224, + patch_size=4, + in_chans=3, + class_num=1000, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + use_checkpoint=False, + **kwargs): + super(SwinTransformer, self).__init__() + + self.num_classes = num_classes = class_num + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2**(self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = self.create_parameter( + shape=(1, num_patches, embed_dim), default_initializer=zeros_) + self.add_parameter("absolute_pos_embed", self.absolute_pos_embed) + trunc_normal_(self.absolute_pos_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = np.linspace(0, drop_path_rate, + sum(depths)).tolist() # stochastic depth decay rule + + # build layers + self.layers = nn.LayerList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2**i_layer), + input_resolution=(patches_resolution[0] // (2**i_layer), + patches_resolution[1] // (2**i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging + if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1D(1) + self.head = nn.Linear( + self.num_features, + num_classes) if self.num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + + x = self.norm(x) # B L C + x = self.avgpool(x.transpose([0, 2, 1])) # B C 1 + x = paddle.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for _, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * self.patches_resolution[ + 0] * self.patches_resolution[1] // (2**self.num_layers) + flops += self.num_features * self.num_classes + return flops + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def SwinTransformer_tiny_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + drop_path_rate=0.2, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_tiny_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_small_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_small_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_base_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=7, + drop_path_rate=0.5, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_base_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_base_patch4_window12_384(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + img_size=384, + embed_dim=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + window_size=12, + drop_path_rate=0.5, # NOTE: do not appear in offical code + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_base_patch4_window12_384"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_large_patch4_window7_224(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=7, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_large_patch4_window7_224"], + use_ssld=use_ssld) + return model + + +def SwinTransformer_large_patch4_window12_384(pretrained=False, + use_ssld=False, + **kwargs): + model = SwinTransformer( + img_size=384, + embed_dim=192, + depths=[2, 2, 18, 2], + num_heads=[6, 12, 24, 48], + window_size=12, + **kwargs) + _load_pretrained( + pretrained, + model, + MODEL_URLS["SwinTransformer_large_patch4_window12_384"], + use_ssld=use_ssld) + return model diff --git a/ppcls/arch/backbone/model_zoo/adaface_ir_net.py b/ppcls/arch/backbone/model_zoo/adaface_ir_net.py new file mode 100644 index 0000000000000000000000000000000000000000..47de152b646e6f824e5a888692b770d9e146223b --- /dev/null +++ b/ppcls/arch/backbone/model_zoo/adaface_ir_net.py @@ -0,0 +1,529 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# this code is based on AdaFace(https://github.com/mk-minchul/AdaFace) +from collections import namedtuple +import paddle +import paddle.nn as nn +from paddle.nn import Dropout +from paddle.nn import MaxPool2D +from paddle.nn import Sequential +from paddle.nn import Conv2D, Linear +from paddle.nn import BatchNorm1D, BatchNorm2D +from paddle.nn import ReLU, Sigmoid +from paddle.nn import Layer +from paddle.nn import PReLU + +# from ppcls.arch.backbone.legendary_models.resnet import _load_pretrained + + +class Flatten(Layer): + """ Flat tensor + """ + + def forward(self, input): + return paddle.reshape(input, [input.shape[0], -1]) + + +class LinearBlock(Layer): + """ Convolution block without no-linear activation layer + """ + + def __init__(self, + in_c, + out_c, + kernel=(1, 1), + stride=(1, 1), + padding=(0, 0), + groups=1): + super(LinearBlock, self).__init__() + self.conv = Conv2D( + in_c, + out_c, + kernel, + stride, + padding, + groups=groups, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=None) + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + self.bn = BatchNorm2D( + out_c, weight_attr=weight_attr, bias_attr=bias_attr) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class GNAP(Layer): + """ Global Norm-Aware Pooling block + """ + + def __init__(self, in_c): + super(GNAP, self).__init__() + self.bn1 = BatchNorm2D(in_c, weight_attr=False, bias_attr=False) + self.pool = nn.AdaptiveAvgPool2D((1, 1)) + self.bn2 = BatchNorm1D(in_c, weight_attr=False, bias_attr=False) + + def forward(self, x): + x = self.bn1(x) + x_norm = paddle.norm(x, 2, 1, True) + x_norm_mean = paddle.mean(x_norm) + weight = x_norm_mean / x_norm + x = x * weight + x = self.pool(x) + x = x.view(x.shape[0], -1) + feature = self.bn2(x) + return feature + + +class GDC(Layer): + """ Global Depthwise Convolution block + """ + + def __init__(self, in_c, embedding_size): + super(GDC, self).__init__() + self.conv_6_dw = LinearBlock( + in_c, + in_c, + groups=in_c, + kernel=(7, 7), + stride=(1, 1), + padding=(0, 0)) + self.conv_6_flatten = Flatten() + self.linear = Linear( + in_c, + embedding_size, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False) + self.bn = BatchNorm1D( + embedding_size, weight_attr=False, bias_attr=False) + + def forward(self, x): + x = self.conv_6_dw(x) + x = self.conv_6_flatten(x) + x = self.linear(x) + x = self.bn(x) + return x + + +class SELayer(Layer): + """ SE block + """ + + def __init__(self, channels, reduction): + super(SELayer, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2D(1) + weight_attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.XavierUniform()) + self.fc1 = Conv2D( + channels, + channels // reduction, + kernel_size=1, + padding=0, + weight_attr=weight_attr, + bias_attr=False) + + self.relu = ReLU() + self.fc2 = Conv2D( + channels // reduction, + channels, + kernel_size=1, + padding=0, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False) + + self.sigmoid = Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + + return module_input * x + + +class BasicBlockIR(Layer): + """ BasicBlock for IRNet + """ + + def __init__(self, in_channel, depth, stride): + super(BasicBlockIR, self).__init__() + + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + if in_channel == depth: + self.shortcut_layer = MaxPool2D(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2D( + in_channel, + depth, (1, 1), + stride, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + self.res_layer = Sequential( + BatchNorm2D( + in_channel, weight_attr=weight_attr, bias_attr=bias_attr), + Conv2D( + in_channel, + depth, (3, 3), (1, 1), + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr), + PReLU(depth), + Conv2D( + depth, + depth, (3, 3), + stride, + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class BottleneckIR(Layer): + """ BasicBlock with bottleneck for IRNet + """ + + def __init__(self, in_channel, depth, stride): + super(BottleneckIR, self).__init__() + reduction_channel = depth // 4 + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + if in_channel == depth: + self.shortcut_layer = MaxPool2D(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2D( + in_channel, + depth, (1, 1), + stride, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + self.res_layer = Sequential( + BatchNorm2D( + in_channel, weight_attr=weight_attr, bias_attr=bias_attr), + Conv2D( + in_channel, + reduction_channel, (1, 1), (1, 1), + 0, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + reduction_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + PReLU(reduction_channel), + Conv2D( + reduction_channel, + reduction_channel, (3, 3), (1, 1), + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + reduction_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + PReLU(reduction_channel), + Conv2D( + reduction_channel, + depth, (1, 1), + stride, + 0, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + depth, weight_attr=weight_attr, bias_attr=bias_attr)) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class BasicBlockIRSE(BasicBlockIR): + def __init__(self, in_channel, depth, stride): + super(BasicBlockIRSE, self).__init__(in_channel, depth, stride) + self.res_layer.add_sublayer("se_block", SELayer(depth, 16)) + + +class BottleneckIRSE(BottleneckIR): + def __init__(self, in_channel, depth, stride): + super(BottleneckIRSE, self).__init__(in_channel, depth, stride) + self.res_layer.add_sublayer("se_block", SELayer(depth, 16)) + + +class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): + '''A named tuple describing a ResNet block.''' + + +def get_block(in_channel, depth, num_units, stride=2): + + return [Bottleneck(in_channel, depth, stride)] +\ + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] + + +def get_blocks(num_layers): + if num_layers == 18: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=2), get_block( + in_channel=64, depth=128, num_units=2), get_block( + in_channel=128, depth=256, num_units=2), get_block( + in_channel=256, depth=512, num_units=2) + ] + elif num_layers == 34: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=3), get_block( + in_channel=64, depth=128, num_units=4), get_block( + in_channel=128, depth=256, num_units=6), get_block( + in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 50: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=3), get_block( + in_channel=64, depth=128, num_units=4), get_block( + in_channel=128, depth=256, num_units=14), get_block( + in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 100: + blocks = [ + get_block( + in_channel=64, depth=64, num_units=3), get_block( + in_channel=64, depth=128, num_units=13), get_block( + in_channel=128, depth=256, num_units=30), get_block( + in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 152: + blocks = [ + get_block( + in_channel=64, depth=256, num_units=3), get_block( + in_channel=256, depth=512, num_units=8), get_block( + in_channel=512, depth=1024, num_units=36), get_block( + in_channel=1024, depth=2048, num_units=3) + ] + elif num_layers == 200: + blocks = [ + get_block( + in_channel=64, depth=256, num_units=3), get_block( + in_channel=256, depth=512, num_units=24), get_block( + in_channel=512, depth=1024, num_units=36), get_block( + in_channel=1024, depth=2048, num_units=3) + ] + + return blocks + + +class Backbone(Layer): + def __init__(self, input_size, num_layers, mode='ir'): + """ Args: + input_size: input_size of backbone + num_layers: num_layers of backbone + mode: support ir or irse + """ + super(Backbone, self).__init__() + assert input_size[0] in [112, 224], \ + "input_size should be [112, 112] or [224, 224]" + assert num_layers in [18, 34, 50, 100, 152, 200], \ + "num_layers should be 18, 34, 50, 100 or 152" + assert mode in ['ir', 'ir_se'], \ + "mode should be ir or ir_se" + weight_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=1.0)) + bias_attr = paddle.ParamAttr( + regularizer=None, initializer=nn.initializer.Constant(value=0.0)) + self.input_layer = Sequential( + Conv2D( + 3, + 64, (3, 3), + 1, + 1, + weight_attr=nn.initializer.KaimingNormal(), + bias_attr=False), + BatchNorm2D( + 64, weight_attr=weight_attr, bias_attr=bias_attr), + PReLU(64)) + blocks = get_blocks(num_layers) + if num_layers <= 100: + if mode == 'ir': + unit_module = BasicBlockIR + elif mode == 'ir_se': + unit_module = BasicBlockIRSE + output_channel = 512 + else: + if mode == 'ir': + unit_module = BottleneckIR + elif mode == 'ir_se': + unit_module = BottleneckIRSE + output_channel = 2048 + + if input_size[0] == 112: + self.output_layer = Sequential( + BatchNorm2D( + output_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + Dropout(0.4), + Flatten(), + Linear( + output_channel * 7 * 7, + 512, + weight_attr=nn.initializer.KaimingNormal()), + BatchNorm1D( + 512, weight_attr=False, bias_attr=False)) + else: + self.output_layer = Sequential( + BatchNorm2D( + output_channel, + weight_attr=weight_attr, + bias_attr=bias_attr), + Dropout(0.4), + Flatten(), + Linear( + output_channel * 14 * 14, + 512, + weight_attr=nn.initializer.KaimingNormal()), + BatchNorm1D( + 512, weight_attr=False, bias_attr=False)) + + modules = [] + for block in blocks: + for bottleneck in block: + modules.append( + unit_module(bottleneck.in_channel, bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + # initialize_weights(self.modules()) + + def forward(self, x): + + # current code only supports one extra image + # it comes with a extra dimension for number of extra image. We will just squeeze it out for now + x = self.input_layer(x) + + for idx, module in enumerate(self.body): + x = module(x) + + x = self.output_layer(x) + # norm = paddle.norm(x, 2, 1, True) + # output = paddle.divide(x, norm) + # return output, norm + return x + + +def AdaFace_IR_18(input_size=(112, 112)): + """ Constructs a ir-18 model. + """ + model = Backbone(input_size, 18, 'ir') + return model + + +def AdaFace_IR_34(input_size=(112, 112)): + """ Constructs a ir-34 model. + """ + model = Backbone(input_size, 34, 'ir') + + return model + + +def AdaFace_IR_50(input_size=(112, 112)): + """ Constructs a ir-50 model. + """ + model = Backbone(input_size, 50, 'ir') + + return model + + +def AdaFace_IR_101(input_size=(112, 112)): + """ Constructs a ir-101 model. + """ + model = Backbone(input_size, 100, 'ir') + + return model + + +def AdaFace_IR_152(input_size=(112, 112)): + """ Constructs a ir-152 model. + """ + model = Backbone(input_size, 152, 'ir') + + return model + + +def AdaFace_IR_200(input_size=(112, 112)): + """ Constructs a ir-200 model. + """ + model = Backbone(input_size, 200, 'ir') + + return model + + +def AdaFace_IR_SE_50(input_size=(112, 112)): + """ Constructs a ir_se-50 model. + """ + model = Backbone(input_size, 50, 'ir_se') + + return model + + +def AdaFace_IR_SE_101(input_size=(112, 112)): + """ Constructs a ir_se-101 model. + """ + model = Backbone(input_size, 100, 'ir_se') + + return model + + +def AdaFace_IR_SE_152(input_size=(112, 112)): + """ Constructs a ir_se-152 model. + """ + model = Backbone(input_size, 152, 'ir_se') + + return model + + +def AdaFace_IR_SE_200(input_size=(112, 112)): + """ Constructs a ir_se-200 model. + """ + model = Backbone(input_size, 200, 'ir_se') + + return model diff --git a/ppcls/arch/backbone/model_zoo/convnext.py b/ppcls/arch/backbone/model_zoo/convnext.py new file mode 100644 index 0000000000000000000000000000000000000000..f30894eab526b8deb5e61a964dc287415f1b1a02 --- /dev/null +++ b/ppcls/arch/backbone/model_zoo/convnext.py @@ -0,0 +1,240 @@ +# MIT License +# +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# Code was heavily based on https://github.com/facebookresearch/ConvNeXt + +import paddle +import paddle.nn as nn +from paddle.nn.initializer import TruncatedNormal, Constant + +from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url + +MODEL_URLS = { + "ConvNeXt_tiny": "", # TODO +} + +__all__ = list(MODEL_URLS.keys()) + +trunc_normal_ = TruncatedNormal(std=.02) +zeros_ = Constant(value=0.) +ones_ = Constant(value=1.) + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = paddle.to_tensor(1 - drop_prob) + shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = paddle.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class ChannelsFirstLayerNorm(nn.Layer): + r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + + def __init__(self, normalized_shape, epsilon=1e-5): + super().__init__() + self.weight = self.create_parameter( + shape=[normalized_shape], default_initializer=ones_) + self.bias = self.create_parameter( + shape=[normalized_shape], default_initializer=zeros_) + self.epsilon = epsilon + self.normalized_shape = [normalized_shape] + + def forward(self, x): + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / paddle.sqrt(s + self.epsilon) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class Block(nn.Layer): + r""" ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6): + super().__init__() + self.dwconv = nn.Conv2D( + dim, dim, 7, padding=3, groups=dim) # depthwise conv + self.norm = nn.LayerNorm(dim, epsilon=1e-6) + # pointwise/1x1 convs, implemented with linear layers + self.pwconv1 = nn.Linear(dim, 4 * dim) + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + if layer_scale_init_value > 0: + self.gamma = self.create_parameter( + shape=[dim], + default_initializer=Constant(value=layer_scale_init_value)) + else: + self.gamma = None + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = x.transpose([0, 2, 3, 1]) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.transpose([0, 3, 1, 2]) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +class ConvNeXt(nn.Layer): + r""" ConvNeXt + A PyTorch impl of : `A ConvNet for the 2020s` - + https://arxiv.org/pdf/2201.03545.pdf + + Args: + in_chans (int): Number of input image channels. Default: 3 + class_num (int): Number of classes for classification head. Default: 1000 + depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] + dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768] + drop_path_rate (float): Stochastic depth rate. Default: 0. + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. + """ + + def __init__(self, + in_chans=3, + class_num=1000, + depths=[3, 3, 9, 3], + dims=[96, 192, 384, 768], + drop_path_rate=0., + layer_scale_init_value=1e-6, + head_init_scale=1.): + super().__init__() + + # stem and 3 intermediate downsampling conv layers + self.downsample_layers = nn.LayerList() + stem = nn.Sequential( + nn.Conv2D( + in_chans, dims[0], 4, stride=4), + ChannelsFirstLayerNorm( + dims[0], epsilon=1e-6)) + self.downsample_layers.append(stem) + for i in range(3): + downsample_layer = nn.Sequential( + ChannelsFirstLayerNorm( + dims[i], epsilon=1e-6), + nn.Conv2D( + dims[i], dims[i + 1], 2, stride=2), ) + self.downsample_layers.append(downsample_layer) + + # 4 feature resolution stages, each consisting of multiple residual blocks + self.stages = nn.LayerList() + dp_rates = [ + x.item() for x in paddle.linspace(0, drop_path_rate, sum(depths)) + ] + cur = 0 + for i in range(4): + stage = nn.Sequential(*[ + Block( + dim=dims[i], + drop_path=dp_rates[cur + j], + layer_scale_init_value=layer_scale_init_value) + for j in range(depths[i]) + ]) + self.stages.append(stage) + cur += depths[i] + + self.norm = nn.LayerNorm(dims[-1], epsilon=1e-6) # final norm layer + self.head = nn.Linear(dims[-1], class_num) + + self.apply(self._init_weights) + self.head.weight.set_value(self.head.weight * head_init_scale) + self.head.bias.set_value(self.head.bias * head_init_scale) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2D, nn.Linear)): + trunc_normal_(m.weight) + if m.bias is not None: + zeros_(m.bias) + + def forward_features(self, x): + for i in range(4): + x = self.downsample_layers[i](x) + x = self.stages[i](x) + # global average pooling, (N, C, H, W) -> (N, C) + return self.norm(x.mean([-2, -1])) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _load_pretrained(pretrained, model, model_url, use_ssld=False): + if pretrained is False: + pass + elif pretrained is True: + load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) + elif isinstance(pretrained, str): + load_dygraph_pretrain(model, pretrained) + else: + raise RuntimeError( + "pretrained type is not available. Please use `string` or `boolean` type." + ) + + +def ConvNeXt_tiny(pretrained=False, use_ssld=False, **kwargs): + model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs) + _load_pretrained( + pretrained, model, MODEL_URLS["ConvNeXt_tiny"], use_ssld=use_ssld) + return model diff --git a/ppcls/arch/backbone/model_zoo/repvgg.py b/ppcls/arch/backbone/model_zoo/repvgg.py index 8ff662a7f88086abeee6b7f6e0260d2d3b3cd0c1..12f65549fad60adae6a412d8adb05f9846922c81 100644 --- a/ppcls/arch/backbone/model_zoo/repvgg.py +++ b/ppcls/arch/backbone/model_zoo/repvgg.py @@ -124,13 +124,7 @@ class RepVGGBlock(nn.Layer): groups=groups) def forward(self, inputs): - if not self.training and not self.is_repped: - self.rep() - self.is_repped = True - if self.training and self.is_repped: - self.is_repped = False - - if not self.training: + if self.is_repped: return self.nonlinearity(self.rbr_reparam(inputs)) if self.rbr_identity is None: @@ -154,6 +148,7 @@ class RepVGGBlock(nn.Layer): kernel, bias = self.get_equivalent_kernel_bias() self.rbr_reparam.weight.set_value(kernel) self.rbr_reparam.bias.set_value(bias) + self.is_repped = True def get_equivalent_kernel_bias(self): kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) diff --git a/ppcls/arch/backbone/model_zoo/res2net_vd.py b/ppcls/arch/backbone/model_zoo/res2net_vd.py index 511fbaa59e6ff5b4e5419edc084631f6e43873fa..2139e198819c6768b975b339e9373fe7f6334f10 100644 --- a/ppcls/arch/backbone/model_zoo/res2net_vd.py +++ b/ppcls/arch/backbone/model_zoo/res2net_vd.py @@ -165,7 +165,8 @@ class BottleneckBlock(nn.Layer): class Res2Net_vd(nn.Layer): - def __init__(self, layers=50, scales=4, width=26, class_num=1000): + def __init__(self, layers=50, scales=4, width=26, class_num=1000, + **kwargs): super(Res2Net_vd, self).__init__() self.layers = layers diff --git a/ppcls/arch/backbone/model_zoo/shufflenet_v2.py b/ppcls/arch/backbone/model_zoo/shufflenet_v2.py index b10249b7e2ea59bfa846c4fa3e09c5fbfe77b9ef..c769afdd4b238fa0a7b92fdb72c3962645a2ac8f 100644 --- a/ppcls/arch/backbone/model_zoo/shufflenet_v2.py +++ b/ppcls/arch/backbone/model_zoo/shufflenet_v2.py @@ -233,7 +233,7 @@ class ShuffleNet(Layer): elif scale == 1.5: stage_out_channels = [-1, 24, 176, 352, 704, 1024] elif scale == 2.0: - stage_out_channels = [-1, 24, 224, 488, 976, 2048] + stage_out_channels = [-1, 24, 244, 488, 976, 2048] else: raise NotImplementedError("This scale size:[" + str(scale) + "] is not implemented!") diff --git a/ppcls/arch/backbone/model_zoo/swin_transformer.py b/ppcls/arch/backbone/model_zoo/swin_transformer.py deleted file mode 100644 index 877b7365998bce81489a89ab57a240deb66d45cc..0000000000000000000000000000000000000000 --- a/ppcls/arch/backbone/model_zoo/swin_transformer.py +++ /dev/null @@ -1,858 +0,0 @@ -# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Code was based on https://github.com/microsoft/Swin-Transformer -# reference: https://arxiv.org/abs/2103.14030 - -import numpy as np -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -from paddle.nn.initializer import TruncatedNormal, Constant - -from .vision_transformer import trunc_normal_, zeros_, ones_, to_2tuple, DropPath, Identity - -from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url - -MODEL_URLS = { - "SwinTransformer_tiny_patch4_window7_224": - "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams", - "SwinTransformer_small_patch4_window7_224": - "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_small_patch4_window7_224_pretrained.pdparams", - "SwinTransformer_base_patch4_window7_224": - "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams", - "SwinTransformer_base_patch4_window12_384": - "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window12_384_pretrained.pdparams", - "SwinTransformer_large_patch4_window7_224": - "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window7_224_22kto1k_pretrained.pdparams", - "SwinTransformer_large_patch4_window12_384": - "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams", -} - -__all__ = list(MODEL_URLS.keys()) - - -class Mlp(nn.Layer): - def __init__(self, - in_features, - hidden_features=None, - out_features=None, - act_layer=nn.GELU, - drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.reshape( - [B, H // window_size, window_size, W // window_size, window_size, C]) - windows = x.transpose([0, 1, 3, 2, 4, 5]).reshape( - [-1, window_size, window_size, C]) - return windows - - -def window_reverse(windows, window_size, H, W, C): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - - Returns: - x: (B, H, W, C) - """ - x = windows.reshape( - [-1, H // window_size, W // window_size, window_size, window_size, C]) - x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([-1, H, W, C]) - return x - - -class WindowAttention(nn.Layer): - r""" Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, - dim, - window_size, - num_heads, - qkv_bias=True, - qk_scale=None, - attn_drop=0., - proj_drop=0.): - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - - # define a parameter table of relative position bias - # 2*Wh-1 * 2*Ww-1, nH - self.relative_position_bias_table = self.create_parameter( - shape=((2 * window_size[0] - 1) * (2 * window_size[1] - 1), - num_heads), - default_initializer=zeros_) - self.add_parameter("relative_position_bias_table", - self.relative_position_bias_table) - - # get pair-wise relative position index for each token inside the window - coords_h = paddle.arange(self.window_size[0]) - coords_w = paddle.arange(self.window_size[1]) - coords = paddle.stack(paddle.meshgrid( - [coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = paddle.flatten(coords, 1) # 2, Wh*Ww - - coords_flatten_1 = coords_flatten.unsqueeze(axis=2) - coords_flatten_2 = coords_flatten.unsqueeze(axis=1) - relative_coords = coords_flatten_1 - coords_flatten_2 - - relative_coords = relative_coords.transpose( - [1, 2, 0]) # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[ - 0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", - relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table) - self.softmax = nn.Softmax(axis=-1) - - def forward(self, x, mask=None): - """ - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = self.qkv(x).reshape( - [B_, N, 3, self.num_heads, C // self.num_heads]).transpose( - [2, 0, 3, 1, 4]) - q, k, v = qkv[0], qkv[1], qkv[2] - - q = q * self.scale - attn = paddle.mm(q, k.transpose([0, 1, 3, 2])) - - index = self.relative_position_index.reshape([-1]) - - relative_position_bias = paddle.index_select( - self.relative_position_bias_table, index) - relative_position_bias = relative_position_bias.reshape([ - self.window_size[0] * self.window_size[1], - self.window_size[0] * self.window_size[1], -1 - ]) # Wh*Ww,Wh*Ww,nH - - relative_position_bias = relative_position_bias.transpose( - [2, 0, 1]) # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N - ]) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.reshape([-1, self.num_heads, N, N]) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - # x = (attn @ v).transpose(1, 2).reshape([B_, N, C]) - x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([B_, N, C]) - x = self.proj(x) - x = self.proj_drop(x) - return x - - def extra_repr(self): - return "dim={}, window_size={}, num_heads={}".format( - self.dim, self.window_size, self.num_heads) - - def flops(self, N): - # calculate flops for 1 window with token length of N - flops = 0 - # qkv = self.qkv(x) - flops += N * self.dim * 3 * self.dim - # attn = (q @ k.transpose(-2, -1)) - flops += self.num_heads * N * (self.dim // self.num_heads) * N - # x = (attn @ v) - flops += self.num_heads * N * N * (self.dim // self.num_heads) - # x = self.proj(x) - flops += N * self.dim * self.dim - return flops - - -class SwinTransformerBlock(nn.Layer): - r""" Swin Transformer Block. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resulotion. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Layer, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, - dim, - input_resolution, - num_heads, - window_size=7, - shift_size=0, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop=0., - attn_drop=0., - drop_path=0., - act_layer=nn.GELU, - norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - if min(self.input_resolution) <= self.window_size: - # if window size is larger than input resolution, we don't partition windows - self.shift_size = 0 - self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, - window_size=to_2tuple(self.window_size), - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, - hidden_features=mlp_hidden_dim, - act_layer=act_layer, - drop=drop) - - if self.shift_size > 0: - # calculate attention mask for SW-MSA - H, W = self.input_resolution - img_mask = paddle.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition( - img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.reshape( - [-1, self.window_size * self.window_size]) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - - huns = -100.0 * paddle.ones_like(attn_mask) - attn_mask = huns * (attn_mask != 0).astype("float32") - else: - attn_mask = None - - self.register_buffer("attn_mask", attn_mask) - - def forward(self, x): - H, W = self.input_resolution - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.reshape([B, H, W, C]) - - # cyclic shift - if self.shift_size > 0: - shifted_x = paddle.roll( - x, shifts=(-self.shift_size, -self.shift_size), axis=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition( - shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.reshape( - [-1, self.window_size * self.window_size, - C]) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn( - x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.reshape( - [-1, self.window_size, self.window_size, C]) - shifted_x = window_reverse(attn_windows, self.window_size, H, W, - C) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = paddle.roll( - shifted_x, - shifts=(self.shift_size, self.shift_size), - axis=(1, 2)) - else: - x = shifted_x - x = x.reshape([B, H * W, C]) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - def extra_repr(self): - return "dim={}, input_resolution={}, num_heads={}, window_size={}, shift_size={}, mlp_ratio={}".format( - self.dim, self.input_resolution, self.num_heads, self.window_size, - self.shift_size, self.mlp_ratio) - - def flops(self): - flops = 0 - H, W = self.input_resolution - # norm1 - flops += self.dim * H * W - # W-MSA/SW-MSA - nW = H * W / self.window_size / self.window_size - flops += nW * self.attn.flops(self.window_size * self.window_size) - # mlp - flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio - # norm2 - flops += self.dim * H * W - return flops - - -class PatchMerging(nn.Layer): - r""" Patch Merging Layer. - - Args: - input_resolution (tuple[int]): Resolution of input feature. - dim (int): Number of input channels. - norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.input_resolution = input_resolution - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias_attr=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x): - """ - x: B, H*W, C - """ - H, W = self.input_resolution - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - assert H % 2 == 0 and W % 2 == 0, "x size ({}*{}) are not even.".format( - H, W) - - x = x.reshape([B, H, W, C]) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = paddle.concat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - def extra_repr(self): - return "input_resolution={}, dim={}".format(self.input_resolution, - self.dim) - - def flops(self): - H, W = self.input_resolution - flops = H * W * self.dim - flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim - return flops - - -class BasicLayer(nn.Layer): - """ A basic Swin Transformer layer for one stage. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Layer, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Layer | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, - dim, - input_resolution, - depth, - num_heads, - window_size, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop=0., - attn_drop=0., - drop_path=0., - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False): - - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.LayerList([ - SwinTransformerBlock( - dim=dim, - input_resolution=input_resolution, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] - if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer) for i in range(depth) - ]) - - # patch merging layer - if downsample is not None: - self.downsample = downsample( - input_resolution, dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x): - for blk in self.blocks: - x = blk(x) - if self.downsample is not None: - x = self.downsample(x) - return x - - def extra_repr(self): - return "dim={}, input_resolution={}, depth={}".format( - self.dim, self.input_resolution, self.depth) - - def flops(self): - flops = 0 - for blk in self.blocks: - flops += blk.flops() - if self.downsample is not None: - flops += self.downsample.flops() - return flops - - -class PatchEmbed(nn.Layer): - """ Image to Patch Embedding - - Args: - img_size (int): Image size. Default: 224. - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Layer, optional): Normalization layer. Default: None - """ - - def __init__(self, - img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - norm_layer=None): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [ - img_size[0] // patch_size[0], img_size[1] // patch_size[1] - ] - self.img_size = img_size - self.patch_size = patch_size - self.patches_resolution = patches_resolution - self.num_patches = patches_resolution[0] * patches_resolution[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2D( - in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - B, C, H, W = x.shape - # TODO (littletomatodonkey), uncomment the line will cause failure of jit.save - # assert [H, W] == self.img_size[:2], "Input image size ({H}*{W}) doesn't match model ({}*{}).".format(H, W, self.img_size[0], self.img_size[1]) - x = self.proj(x) - - x = x.flatten(2).transpose([0, 2, 1]) # B Ph*Pw C - if self.norm is not None: - x = self.norm(x) - return x - - def flops(self): - Ho, Wo = self.patches_resolution - flops = Ho * Wo * self.embed_dim * self.in_chans * ( - self.patch_size[0] * self.patch_size[1]) - if self.norm is not None: - flops += Ho * Wo * self.embed_dim - return flops - - -class SwinTransformer(nn.Layer): - """ Swin Transformer - A PaddlePaddle impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/pdf/2103.14030 - - Args: - img_size (int | tuple(int)): Input image size. Default 224 - patch_size (int | tuple(int)): Patch size. Default: 4 - in_chans (int): Number of input image channels. Default: 3 - num_classes (int): Number of classes for classification head. Default: 1000 - embed_dim (int): Patch embedding dimension. Default: 96 - depths (tuple(int)): Depth of each Swin Transformer layer. - num_heads (tuple(int)): Number of attention heads in different layers. - window_size (int): Window size. Default: 7 - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None - drop_rate (float): Dropout rate. Default: 0 - attn_drop_rate (float): Attention dropout rate. Default: 0 - drop_path_rate (float): Stochastic depth rate. Default: 0.1 - norm_layer (nn.Layer): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False - patch_norm (bool): If True, add normalization after patch embedding. Default: True - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False - """ - - def __init__(self, - img_size=224, - patch_size=4, - in_chans=3, - class_num=1000, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - use_checkpoint=False, - **kwargs): - super(SwinTransformer, self).__init__() - - self.num_classes = num_classes = class_num - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.num_features = int(embed_dim * 2**(self.num_layers - 1)) - self.mlp_ratio = mlp_ratio - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - img_size=img_size, - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - num_patches = self.patch_embed.num_patches - patches_resolution = self.patch_embed.patches_resolution - self.patches_resolution = patches_resolution - - # absolute position embedding - if self.ape: - self.absolute_pos_embed = self.create_parameter( - shape=(1, num_patches, embed_dim), default_initializer=zeros_) - self.add_parameter("absolute_pos_embed", self.absolute_pos_embed) - trunc_normal_(self.absolute_pos_embed) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = np.linspace(0, drop_path_rate, - sum(depths)).tolist() # stochastic depth decay rule - - # build layers - self.layers = nn.LayerList() - for i_layer in range(self.num_layers): - layer = BasicLayer( - dim=int(embed_dim * 2**i_layer), - input_resolution=(patches_resolution[0] // (2**i_layer), - patches_resolution[1] // (2**i_layer)), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], - norm_layer=norm_layer, - downsample=PatchMerging - if (i_layer < self.num_layers - 1) else None, - use_checkpoint=use_checkpoint) - self.layers.append(layer) - - self.norm = norm_layer(self.num_features) - self.avgpool = nn.AdaptiveAvgPool1D(1) - self.head = nn.Linear( - self.num_features, - num_classes) if self.num_classes > 0 else nn.Identity() - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight) - if isinstance(m, nn.Linear) and m.bias is not None: - zeros_(m.bias) - elif isinstance(m, nn.LayerNorm): - zeros_(m.bias) - ones_(m.weight) - - def forward_features(self, x): - x = self.patch_embed(x) - if self.ape: - x = x + self.absolute_pos_embed - x = self.pos_drop(x) - - for layer in self.layers: - x = layer(x) - - x = self.norm(x) # B L C - x = self.avgpool(x.transpose([0, 2, 1])) # B C 1 - x = paddle.flatten(x, 1) - return x - - def forward(self, x): - x = self.forward_features(x) - x = self.head(x) - return x - - def flops(self): - flops = 0 - flops += self.patch_embed.flops() - for _, layer in enumerate(self.layers): - flops += layer.flops() - flops += self.num_features * self.patches_resolution[ - 0] * self.patches_resolution[1] // (2**self.num_layers) - flops += self.num_features * self.num_classes - return flops - - -def _load_pretrained(pretrained, model, model_url, use_ssld=False): - if pretrained is False: - pass - elif pretrained is True: - load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld) - elif isinstance(pretrained, str): - load_dygraph_pretrain(model, pretrained) - else: - raise RuntimeError( - "pretrained type is not available. Please use `string` or `boolean` type." - ) - - -def SwinTransformer_tiny_patch4_window7_224(pretrained=False, - use_ssld=False, - **kwargs): - model = SwinTransformer( - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - drop_path_rate=0.2, - **kwargs) - _load_pretrained( - pretrained, - model, - MODEL_URLS["SwinTransformer_tiny_patch4_window7_224"], - use_ssld=use_ssld) - return model - - -def SwinTransformer_small_patch4_window7_224(pretrained=False, - use_ssld=False, - **kwargs): - model = SwinTransformer( - embed_dim=96, - depths=[2, 2, 18, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - **kwargs) - _load_pretrained( - pretrained, - model, - MODEL_URLS["SwinTransformer_small_patch4_window7_224"], - use_ssld=use_ssld) - return model - - -def SwinTransformer_base_patch4_window7_224(pretrained=False, - use_ssld=False, - **kwargs): - model = SwinTransformer( - embed_dim=128, - depths=[2, 2, 18, 2], - num_heads=[4, 8, 16, 32], - window_size=7, - drop_path_rate=0.5, - **kwargs) - _load_pretrained( - pretrained, - model, - MODEL_URLS["SwinTransformer_base_patch4_window7_224"], - use_ssld=use_ssld) - return model - - -def SwinTransformer_base_patch4_window12_384(pretrained=False, - use_ssld=False, - **kwargs): - model = SwinTransformer( - img_size=384, - embed_dim=128, - depths=[2, 2, 18, 2], - num_heads=[4, 8, 16, 32], - window_size=12, - drop_path_rate=0.5, # NOTE: do not appear in offical code - **kwargs) - _load_pretrained( - pretrained, - model, - MODEL_URLS["SwinTransformer_base_patch4_window12_384"], - use_ssld=use_ssld) - return model - - -def SwinTransformer_large_patch4_window7_224(pretrained=False, - use_ssld=False, - **kwargs): - model = SwinTransformer( - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - window_size=7, - **kwargs) - _load_pretrained( - pretrained, - model, - MODEL_URLS["SwinTransformer_large_patch4_window7_224"], - use_ssld=use_ssld) - return model - - -def SwinTransformer_large_patch4_window12_384(pretrained=False, - use_ssld=False, - **kwargs): - model = SwinTransformer( - img_size=384, - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - window_size=12, - **kwargs) - _load_pretrained( - pretrained, - model, - MODEL_URLS["SwinTransformer_large_patch4_window12_384"], - use_ssld=use_ssld) - return model diff --git a/ppcls/arch/backbone/model_zoo/vision_transformer.py b/ppcls/arch/backbone/model_zoo/vision_transformer.py index d3f149d232d644825d4ed2f8b51a47ad9f80335f..35796e5e9610587d794428dc8284cab5bae3d554 100644 --- a/ppcls/arch/backbone/model_zoo/vision_transformer.py +++ b/ppcls/arch/backbone/model_zoo/vision_transformer.py @@ -62,7 +62,7 @@ def drop_path(x, drop_prob=0., training=False): return x keep_prob = paddle.to_tensor(1 - drop_prob) shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1) - random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + random_tensor = keep_prob + paddle.rand(shape).astype(x.dtype) random_tensor = paddle.floor(random_tensor) # binarize output = x.divide(keep_prob) * random_tensor return output diff --git a/ppcls/arch/gears/__init__.py b/ppcls/arch/gears/__init__.py index 8757aa4aeb4a510857ca4dc1c60696b1d6e86a0b..871967804e21c362935915942aa3f621207b934e 100644 --- a/ppcls/arch/gears/__init__.py +++ b/ppcls/arch/gears/__init__.py @@ -19,6 +19,7 @@ from .fc import FC from .vehicle_neck import VehicleNeck from paddle.nn import Tanh from .bnneck import BNNeck +from .adamargin import AdaMargin __all__ = ['build_gear'] @@ -26,7 +27,7 @@ __all__ = ['build_gear'] def build_gear(config): support_dict = [ 'ArcMargin', 'CosMargin', 'CircleMargin', 'FC', 'VehicleNeck', 'Tanh', - 'BNNeck' + 'BNNeck', 'AdaMargin' ] module_name = config.pop('name') assert module_name in support_dict, Exception( diff --git a/ppcls/arch/gears/adamargin.py b/ppcls/arch/gears/adamargin.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0f5f245dbbe2c282f726b7d5be3634d6df912c --- /dev/null +++ b/ppcls/arch/gears/adamargin.py @@ -0,0 +1,111 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This code is based on AdaFace(https://github.com/mk-minchul/AdaFace) +# Paper: AdaFace: Quality Adaptive Margin for Face Recognition +from paddle.nn import Layer +import math +import paddle + + +def l2_norm(input, axis=1): + norm = paddle.norm(input, 2, axis, True) + output = paddle.divide(input, norm) + return output + + +class AdaMargin(Layer): + def __init__( + self, + embedding_size=512, + class_num=70722, + m=0.4, + h=0.333, + s=64., + t_alpha=1.0, ): + super(AdaMargin, self).__init__() + self.classnum = class_num + kernel_weight = paddle.uniform( + [embedding_size, class_num], min=-1, max=1) + kernel_weight_norm = paddle.norm( + kernel_weight, p=2, axis=0, keepdim=True) + kernel_weight_norm = paddle.where(kernel_weight_norm > 1e-5, + kernel_weight_norm, + paddle.ones_like(kernel_weight_norm)) + kernel_weight = kernel_weight / kernel_weight_norm + self.kernel = self.create_parameter( + [embedding_size, class_num], + attr=paddle.nn.initializer.Assign(kernel_weight)) + + # initial kernel + # self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5) + self.m = m + self.eps = 1e-3 + self.h = h + self.s = s + + # ema prep + self.t_alpha = t_alpha + self.register_buffer('t', paddle.zeros([1]), persistable=True) + self.register_buffer( + 'batch_mean', paddle.ones([1]) * 20, persistable=True) + self.register_buffer( + 'batch_std', paddle.ones([1]) * 100, persistable=True) + + def forward(self, embbedings, label): + + norms = paddle.norm(embbedings, 2, 1, True) + embbedings = paddle.divide(embbedings, norms) + kernel_norm = l2_norm(self.kernel, axis=0) + cosine = paddle.mm(embbedings, kernel_norm) + cosine = paddle.clip(cosine, -1 + self.eps, + 1 - self.eps) # for stability + + safe_norms = paddle.clip(norms, min=0.001, max=100) # for stability + safe_norms = safe_norms.clone().detach() + + # update batchmean batchstd + with paddle.no_grad(): + mean = safe_norms.mean().detach() + std = safe_norms.std().detach() + self.batch_mean = mean * self.t_alpha + (1 - self.t_alpha + ) * self.batch_mean + self.batch_std = std * self.t_alpha + (1 - self.t_alpha + ) * self.batch_std + + margin_scaler = (safe_norms - self.batch_mean) / ( + self.batch_std + self.eps) # 66% between -1, 1 + margin_scaler = margin_scaler * self.h # 68% between -0.333 ,0.333 when h:0.333 + margin_scaler = paddle.clip(margin_scaler, -1, 1) + + # g_angular + m_arc = paddle.nn.functional.one_hot( + label.reshape([-1]), self.classnum) + g_angular = self.m * margin_scaler * -1 + m_arc = m_arc * g_angular + theta = paddle.acos(cosine) + theta_m = paddle.clip( + theta + m_arc, min=self.eps, max=math.pi - self.eps) + cosine = paddle.cos(theta_m) + + # g_additive + m_cos = paddle.nn.functional.one_hot( + label.reshape([-1]), self.classnum) + g_add = self.m + (self.m * margin_scaler) + m_cos = m_cos * g_add + cosine = cosine - m_cos + + # scale + scaled_cosine_m = cosine * self.s + return scaled_cosine_m diff --git a/ppcls/arch/slim/quant.py b/ppcls/arch/slim/quant.py index b8f59a78fdd9a8f1f3e613f5ee44d4fa68266e30..3e7b4a242be22d8bca57a36ef3183201068cb0b6 100644 --- a/ppcls/arch/slim/quant.py +++ b/ppcls/arch/slim/quant.py @@ -40,12 +40,20 @@ QUANT_CONFIG = { } -def quantize_model(config, model): +def quantize_model(config, model, mode="train"): if config.get("Slim", False) and config["Slim"].get("quant", False): from paddleslim.dygraph.quant import QAT assert config["Slim"]["quant"]["name"].lower( ) == 'pact', 'Only PACT quantization method is supported now' QUANT_CONFIG["activation_preprocess_type"] = "PACT" + if mode in ["infer", "export"]: + QUANT_CONFIG['activation_preprocess_type'] = None + + # for rep nets, convert to reparameterized model first + for layer in model.sublayers(): + if hasattr(layer, "rep"): + layer.rep() + model.quanter = QAT(config=QUANT_CONFIG) model.quanter.quantize(model) logger.info("QAT model summary:") diff --git a/ppcls/configs/Attr/StrongBaselineAttr.yaml b/ppcls/configs/Attr/StrongBaselineAttr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2324015d667a09a56570677713792b16f1b2ed03 --- /dev/null +++ b/ppcls/configs/Attr/StrongBaselineAttr.yaml @@ -0,0 +1,113 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "ResNet50" + pretrained: True + class_num: 26 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Adam + lr: + name: Piecewise + decay_epochs: [12, 18, 24, 28] + values: [0.0001, 0.00001, 0.000001, 0.0000001] + regularizer: + name: 'L2' + coeff: 0.0005 + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/attribute/data/" + cls_label_path: "dataset/attribute/trainval.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/attribute/data/" + cls_label_path: "dataset/attribute/test.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: diff --git a/ppcls/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml b/ppcls/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb6e3cbdbb2dc648e4ef0bd1cad59106efbf91db --- /dev/null +++ b/ppcls/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + update_freq: 4 # for 8 cards + +# model ema +EMA: + decay: 0.9999 + + +# model architecture +Arch: + name: ConvNeXt_tiny + class_num: 1000 + drop_path_rate: 0.1 + layer_scale_init_value: 1e-6 + head_init_scale: 1.0 + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 4e-3 # lr 4e-3 for total_batch_size 4096 + eta_min: 1e-6 + warmup_epoch: 20 + warmup_start_lr: 0 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + + +Metric: + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml b/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d906c22de20914eeb72d2162f9bfa2142b357dcf --- /dev/null +++ b/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_dml.yaml @@ -0,0 +1,158 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_lcnet_x2_5_dml + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + infer_model_name: "Student" + models: + - Teacher: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml b/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml new file mode 100644 index 0000000000000000000000000000000000000000..656a0e907716e0d7d7df8ec6ab2923f584fc368c --- /dev/null +++ b/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_ssld.yaml @@ -0,0 +1,157 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_r50_vd_distill + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + to_static: True + +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + infer_model_name: "Student" + models: + - Teacher: + name: ResNet50_vd + class_num: *class_num + pretrained: True + use_ssld: True + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.2 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml b/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b425b592fdabea61832a1ad3c2f50a20b62ecd6f --- /dev/null +++ b/ppcls/configs/ImageNet/Distillation/PPLCNet_x2_5_udml.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output_lcnet_x2_5_udml + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + infer_model_name: "Student" + models: + - Teacher: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + - Student: + name: PPLCNet_x2_5 + class_num: *class_num + pretrained: False + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + key: logits + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + key: logits + model_name_pairs: + - ["Student", "Teacher"] + - DistillationDistanceLoss: + weight: 1.0 + key: "blocks5" + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + diff --git a/ppcls/configs/ImageNet/Distillation/res2net200_vd_distill_pphgnet_base.yaml b/ppcls/configs/ImageNet/Distillation/res2net200_vd_distill_pphgnet_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7af9680cb2ec90c4c70cdf66d1c7f5a6225a9456 --- /dev/null +++ b/ppcls/configs/ImageNet/Distillation/res2net200_vd_distill_pphgnet_base.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 360 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_dali: false + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: Res2Net200_vd_26w_4s + class_num: *class_num + pretrained: True + use_ssld: True + - Student: + name: PPHGNet_base + class_num: *class_num + pretrained: False + + infer_model_name: "Student" + + +# loss function config for traing/eval process +Loss: + Train: + - DistillationCELoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/train_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: "./dataset/ILSVRC2012/" + cls_label_path: "./dataset/ILSVRC2012/val_list.txt" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: "docs/images/inference_deployment/whl_demo.jpg" + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: DistillationPostProcess + func: Topk + topk: 5 + class_id_map_file: "ppcls/utils/imagenet1k_label_list.txt" + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml b/ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9bae5a3c1fde48afd103a6ccb302d723ea4a4c99 --- /dev/null +++ b/ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dist.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/r34_r18_dist + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 100 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + to_static: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 1000 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + infer_model_name: "Student" + models: + - Teacher: + name: ResNet34 + class_num: *class_num + pretrained: True + - Student: + name: ResNet18 + class_num: *class_num + pretrained: False + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + model_names: ["Student"] + - DistillationDISTLoss: + weight: 2.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + weight_decay: 1e-4 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + diff --git a/ppcls/configs/ImageNet/PPHGNet/PPHGNet_base.yaml b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e07692b01715ffa8196e5ded4604f9294d1ed07 --- /dev/null +++ b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_base.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNet_base + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m15-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.4 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.4 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eabccd4b712ab48886c74caf6b784b4c193f6913 --- /dev/null +++ b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNet_small + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 236 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e423c866b131aefda13b0186eca7ac27d3c84733 --- /dev/null +++ b/ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 600 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: PPHGNet_tiny + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 232 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml b/ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..640833938bd81d8dd24c8bdd0ae1de86d8697a10 --- /dev/null +++ b/ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml @@ -0,0 +1,133 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 480 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNetV2_base + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + # support to specify width and height respectively: + # scales: [(160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 500 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml b/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml index 6a4425b4048ce5c2881ca5bc55e4902b5f50396b..01ba0169af8eaa58a3bf53b60be6249cb04bb737 100644 --- a/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml +++ b/ppcls/configs/ImageNet/ResNet/ResNet50_amp_O2_ultra.yaml @@ -105,7 +105,6 @@ DataLoader: mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' - output_fp16: True channel_num: *image_channel sampler: name: DistributedBatchSampler @@ -132,7 +131,6 @@ Infer: mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' - output_fp16: True channel_num: *image_channel - ToCHWImage: PostProcess: diff --git a/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml b/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml index af987ed7f59ff9c9576d4fb417c48e112afa3986..72857c2cea5500cf3e728cc2edddf69343cc4814 100644 --- a/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml +++ b/ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d_amp_O2_ultra.yaml @@ -15,6 +15,13 @@ Global: image_shape: [*image_channel, 224, 224] save_inference_dir: ./inference +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O2: pure fp16 + level: O2 + # model architecture Arch: name: SE_ResNeXt101_32x4d @@ -32,13 +39,6 @@ Loss: - CELoss: weight: 1.0 -# mixed precision training -AMP: - scale_loss: 128.0 - use_dynamic_loss_scaling: True - # O2: pure fp16 - level: O2 - Optimizer: name: Momentum momentum: 0.9 @@ -99,10 +99,9 @@ DataLoader: mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' - output_fp16: True channel_num: *image_channel sampler: - name: BatchSampler + name: DistributedBatchSampler batch_size: 64 drop_last: False shuffle: False @@ -126,7 +125,6 @@ Infer: mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] order: '' - output_fp16: True channel_num: *image_channel - ToCHWImage: PostProcess: diff --git a/ppcls/configs/PULC/car_exists/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/car_exists/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..911b8edec269b593fc500416a61fe044fb56ab0d --- /dev/null +++ b/ppcls/configs/PULC/car_exists/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,139 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 2 + pretrained: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..247f655b56946a9e81bf0b25fd827bdbde059735 --- /dev/null +++ b/ppcls/configs/PULC/car_exists/PPLCNet_x1_0.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.9 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4c11802d6f1a5c1a5d0395bd7d32ce4f08ab26bc --- /dev/null +++ b/ppcls/configs/PULC/car_exists/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/car_exists/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/car_exists/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c263f2309feb333b339f5e288b9b2ba30ac30c44 --- /dev/null +++ b/ppcls/configs/PULC/car_exists/PPLCNet_x1_0_search.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/car_exists/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/car_exists/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a75fda4b48ff613dc681d9dd20bcc5d753de6c74 --- /dev/null +++ b/ppcls/configs/PULC/car_exists/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,169 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 2e-6 + warmup_epoch: 5 + warmup_start_lr: 2e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/car_exists/ + cls_label_path: ./dataset/car_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/car_exists/objects365_00001507.jpeg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: no_car + label_1: contains_car + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + max_fpr: 0.01 + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/car_exists/search.yaml b/ppcls/configs/PULC/car_exists/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..820337c027248501a564a74937934de9e602734c --- /dev/null +++ b/ppcls/configs/PULC/car_exists/search.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_person_cls +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/PULC/language_classification/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/language_classification/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3973ff421325f5b2151dfe3349d062aa2ed90c0 --- /dev/null +++ b/ppcls/configs/PULC/language_classification/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + start_eval_epoch: 20 + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 10 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..081d8d23f2be9598adf450cd048a2f6094d4477c --- /dev/null +++ b/ppcls/configs/PULC/language_classification/PPLCNet_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 80, 160] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 10 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + lr_mult_list : [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 1.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d792c573df5454a753feab7fb4d6b214a894b10f --- /dev/null +++ b/ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 10 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + lr_mult_list : [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 1.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49a5f17026e99441db8949b52e3b8d1942bc3139 --- /dev/null +++ b/ppcls/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml @@ -0,0 +1,142 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 48, 192] + save_inference_dir: ./inference + start_eval_epoch: 20 + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 10 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 32 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/language_classification/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/language_classification/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e1a45a9e6eb08e39afbd74583b560db801306e7 --- /dev/null +++ b/ppcls/configs/PULC/language_classification/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,160 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 10 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-4 + eta_min: 1e-5 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/language_classification/ + cls_label_path: ./dataset/language_classification/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/language_classification/word_35404.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/language_classification_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/language_classification/search.yaml b/ppcls/configs/PULC/language_classification/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4b3dde564b1711e1e2c1d8c22b69d8e264adacd --- /dev/null +++ b/ppcls/configs/PULC/language_classification/search.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/language_classification/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/language_classification/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_language_classification +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.2, 0.4, 0.8] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.ResizeImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + - DataLoader.Eval.dataset.transform_ops.1.ResizeImage.size + search_values: [[192, 48], [180, 60], [160, 80]] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.5, 1.0] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.5, 1.0] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list \ No newline at end of file diff --git a/ppcls/configs/PULC/person_attribute/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/person_attribute/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..94b443832cd2244ac900a85f78f6ab2ac05cb848 --- /dev/null +++ b/ppcls/configs/PULC/person_attribute/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,135 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "MobileNetV3_small_x0_35" + pretrained: True + class_num: 26 + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + #clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b042ad757421f99572f6e2df3a7fb3cec4a7a510 --- /dev/null +++ b/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 26 + + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - TimmAutoAugment: + prob: 0.8 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.4 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml b/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd6503488f4730599c98d2f5889b7bf87aa0ba7a --- /dev/null +++ b/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + use_multilabel: True + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 26 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + - DistillationMultiLabelLoss: + weight: 1.0 + weight_ratio: True + model_names: ["Student"] + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - TimmAutoAugment: + prob: 0.8 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.4 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: diff --git a/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8f6b0d7fede587c09bd0a01286ec62590854d12b --- /dev/null +++ b/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 26 + + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/person_attribute/Res2Net200_vd_26w_4s.yaml b/ppcls/configs/PULC/person_attribute/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f7dc273c3d057a4505fa01f198b75411838f3e8 --- /dev/null +++ b/ppcls/configs/PULC/person_attribute/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 192] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "Res2Net200_vd_26w_4s" + pretrained: True + class_num: 26 + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - Padv2: + size: [212, 276] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [192, 256] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 256] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/person_attribute/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/person_attribute/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36c3d6aae19b70a56bf1aebe3989fa83f0fcc715 --- /dev/null +++ b/ppcls/configs/PULC/person_attribute/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,135 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "SwinTransformer_tiny_patch4_window7_224" + pretrained: True + class_num: 26 + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + #clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - Padv2: + size: [244, 244] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [224, 224] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/pa100k/" + cls_label_path: "dataset/pa100k/val_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_attribute/090004.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [224, 224] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: PersonAttribute + threshold: 0.5 #default threshold + glasses_threshold: 0.3 #threshold only for glasses + hold_threshold: 0.6 #threshold only for hold + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/person_attribute/search.yaml b/ppcls/configs/PULC/person_attribute/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..78192d1132fb4c2cdf1261c86df020f4389ac77e --- /dev/null +++ b/ppcls/configs/PULC/person_attribute/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person_attribute/PPLCNet_x1_0_Distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_attr +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0001, 0.005, 0.01, 0.02, 0.05] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.ResizeImage.size + - DataLoader.Train.dataset.transform_ops.4.RandomCropImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + search_values: [[192, 256]] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.7.RandomErasing.EPSILON + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9510ec258a678e513960a02fb83139e9312fca91 --- /dev/null +++ b/ppcls/configs/PULC/person_exists/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 2 + pretrained: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93e9841d97209350521d3882b3288add5f748ffe --- /dev/null +++ b/ppcls/configs/PULC/person_exists/PPLCNet_x1_0.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.9 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d3aa325870d645449d465662234e9a6551c01bf --- /dev/null +++ b/ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.1 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86c25a05b47399cfe044cab30cea06e94bcb90ec --- /dev/null +++ b/ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml @@ -0,0 +1,151 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..be10d67b78a23948a6c62cf379be16d297647d38 --- /dev/null +++ b/ppcls/configs/PULC/person_exists/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,168 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 5e-5 + eta_min: 1e-6 + warmup_epoch: 5 + warmup_start_lr: 1e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/person_exists/ + cls_label_path: ./dataset/person_exists/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/person_exists/objects365_02035329.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: nobody + label_1: someone + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TprAtFpr: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/person_exists/search.yaml b/ppcls/configs/PULC/person_exists/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..820337c027248501a564a74937934de9e602734c --- /dev/null +++ b/ppcls/configs/PULC/person_exists/search.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person_exists/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_person_cls +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/PULC/safety_helmet/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/safety_helmet/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ef4beb79db475869414ec3c6e7b9ade3e24b50a --- /dev/null +++ b/ppcls/configs/PULC/safety_helmet/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + pretrained: True + class_num: 2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.08 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4c3c8642d9464025dda4d628d328e34d3b8a1613 --- /dev/null +++ b/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + pretrained: True + use_ssld: True + class_num: 2 + use_sync_bn : True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.025 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 176 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob : 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size : 176 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON : 0.1 + r1 : 0.3 + sh : 1.0/3.0 + sl : 0.02 + attempt : 10 + use_log_aspect : True + mode : pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..254db5df466cfb928561d295e401086cd5731f1b --- /dev/null +++ b/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,185 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 1 + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - False + - False + use_sync_bn: True + models: + - Teacher: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + return_stages: True + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + return_stages: True + return_patterns: ["blocks3", "blocks4", "blocks5", "blocks6"] + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationGTCELoss: + weight: 1.0 + key: logits + model_names: ["Student", "Teacher"] + - DistillationDMLLoss: + weight: 1.0 + key: logits + model_name_pairs: + - ["Student", "Teacher"] + - DistillationDistanceLoss: + weight: 1.0 + key: "blocks4" + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.015 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98f63a613278cf7eed879d60a65e942bdfb4c687 --- /dev/null +++ b/ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 40 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference +# model architecture +Arch: + name: PPLCNet_x1_0 + pretrained: True + use_ssld: True + class_num: 2 + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.10 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 192 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + prob: 0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 192 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/ppcls/configs/PULC/safety_helmet/Res2Net200_vd_26w_4s.yaml b/ppcls/configs/PULC/safety_helmet/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b987d510c17df28c100728690f8a5d62293d36c --- /dev/null +++ b/ppcls/configs/PULC/safety_helmet/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,137 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: Res2Net200_vd_26w_4s + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.005 + regularizer: + name: 'L2' + coeff: 0.0001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + batch_transform_ops: + - MixupOperator: + alpha: 0.2 + + sampler: + name: DistributedBatchSampler + batch_size: 32 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Train: + - TopkAcc: + topk: [1] + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] + diff --git a/ppcls/configs/PULC/safety_helmet/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/safety_helmet/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5863ee17e4627cff71444710988e94ae76cd8025 --- /dev/null +++ b/ppcls/configs/PULC/safety_helmet/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,159 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + pretrained: True + class_num: 2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-5 + eta_min: 1e-7 + warmup_epoch: 5 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/safety_helmet/ + cls_label_path: ./dataset/safety_helmet/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/safety_helmet/safety_helmet_test_1.png + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: ThreshOutput + threshold: 0.5 + label_0: wearing_helmet + label_1: unwearing_helmet + +Metric: + Eval: + - TprAtFpr: + max_fpr: 0.0001 + - TopkAcc: + topk: [1] diff --git a/ppcls/configs/PULC/safety_helmet/search.yaml b/ppcls/configs/PULC/safety_helmet/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8c1c933dfef09f1d3a6189f4701a1e7d0678ab9 --- /dev/null +++ b/ppcls/configs/PULC/safety_helmet/search.yaml @@ -0,0 +1,36 @@ +base_config_file: ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/safety_helmet/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_safety_helmet +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.11, 0.12] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + algorithm: "udml" +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/PULC/text_image_orientation/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/text_image_orientation/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7eaff97684db9661c62d782e9feb65e7f7ba42f7 --- /dev/null +++ b/ppcls/configs/PULC/text_image_orientation/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + start_eval_epoch: 40 + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 4 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 1.3 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 16 + use_shared_memory: True + +Infer: + infer_imgs: ddeploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8ded908ebcd389224721102984c7c63cd22293f --- /dev/null +++ b/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 4 + pretrained: True + use_ssld: True + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b8fd0b10843ebbda530c198f65060c728a19dba1 --- /dev/null +++ b/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 4 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.4 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list_for_distill.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0ba7881569ae568c96540073ed57e1e9c5f5d6ca --- /dev/null +++ b/ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml @@ -0,0 +1,146 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + start_eval_epoch: 40 + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 4 + pretrained: True + use_ssld: True + + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/text_image_orientation/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/text_image_orientation/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d123cd4bcbf21c553a6f06ac6d767e9f38b1471 --- /dev/null +++ b/ppcls/configs/PULC/text_image_orientation/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,157 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 60 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 4 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 2.5e-4 + eta_min: 1e-5 + warmup_epoch: 20 + warmup_start_lr: 1e-6 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/text_image_orientation/ + cls_label_path: ./dataset/text_image_orientation/test_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/text_image_orientation/img_rot0_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + interpolation: bicubic + backend: pil + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt + +Metric: + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/text_image_orientation/search.yaml b/ppcls/configs/PULC/text_image_orientation/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d8e65f5f07922985c2dc2a9756e28fcd7f7a0c16 --- /dev/null +++ b/ppcls/configs/PULC/text_image_orientation/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/text_image_orientation/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_text_image_orientation +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.1, 0.2, 0.4, 0.8] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.4.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.0, 0.3, 0.5, 0.8, 1.0] + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/PULC/textline_orientation/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/textline_orientation/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..040868378eb2cb80e66b72e6a1903c69a0833d7b --- /dev/null +++ b/ppcls/configs/PULC/textline_orientation/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,134 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 18 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 2 + pretrained: True + use_sync_bn: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.13 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 512 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ab3657d8a93f7825fa2c79fe341db5dbfdfa123 --- /dev/null +++ b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0.yaml @@ -0,0 +1,143 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 80, 160] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_224x224.yaml b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_224x224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17b9cbb158285bf6e451625f088a8f9b69705e6a --- /dev/null +++ b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_224x224.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.04 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cc57e637153d7420d2f99bdece5bb0c8e5b0079 --- /dev/null +++ b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,162 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 80, 160] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 2 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + use_sync_bn: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.8 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - TimmAutoAugment: + prob: 1.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [160, 80] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9e1863776522ca412168b8f11cef47f41bd3e63 --- /dev/null +++ b/ppcls/configs/PULC/textline_orientation/PPLCNet_x1_0_search.yaml @@ -0,0 +1,144 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + start_eval_epoch: 18 + eval_interval: 1 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 48, 192] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 2 + pretrained: True + use_ssld: True + stride_list: [2, [2, 1], [2, 1], [2, 1], [2, 1]] + + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.5 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 16 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [192, 48] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/textline_orientation/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/textline_orientation/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a466d5e08d78eb8859ae0fa3c46e61f5c94d9509 --- /dev/null +++ b/ppcls/configs/PULC/textline_orientation/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,164 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 10 + epochs: 20 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 2 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 1e-4 + eta_min: 2e-6 + warmup_epoch: 5 + warmup_start_lr: 2e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/textline_orientation/ + cls_label_path: ./dataset/textline_orientation/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/textline_orientation/textline_orientation_test_0_0.png + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 1 + class_id_map_file: ppcls/utils/PULC_label_list/textline_orientation_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 2] + Eval: + - TopkAcc: + topk: [1, 2] diff --git a/ppcls/configs/PULC/textline_orientation/search.yaml b/ppcls/configs/PULC/textline_orientation/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4419949bc6fc8e266d64ae21bb9f1ed7015e65b3 --- /dev/null +++ b/ppcls/configs/PULC/textline_orientation/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/text_direction/PPLCNet_x1_0.yaml +distill_config_file: ppcls/configs/PULC/text_direction/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_text +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.ResizeImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + - DataLoader.Eval.dataset.transform_ops.1.ResizeImage.size + search_values: [[192, 48], [180, 60], [160, 80]] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.4.RandomErasing.EPSILON + search_values: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/PULC/traffic_sign/MobileNetV3_samll_x0_35.yaml b/ppcls/configs/PULC/traffic_sign/MobileNetV3_samll_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5ebe7441ed307bc0dad25be396db6fa9d849a55b --- /dev/null +++ b/ppcls/configs/PULC/traffic_sign/MobileNetV3_samll_x0_35.yaml @@ -0,0 +1,132 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + +# model architecture +Arch: + name: MobileNetV3_small_x0_35 + class_num: 232 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00001 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + diff --git a/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5362d07b7b821f13dad7c1520a978a952d4cbad4 --- /dev/null +++ b/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 0 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 232 + pretrained: True + use_ssld: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.02 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.5 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/traffic_sign/99603_17806.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b00c250e1ec01b13b469dfbf8ed472bd2270af23 --- /dev/null +++ b/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,172 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 232 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + freeze_params_list: + - True + - False + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + pretrained: False + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + + infer_model_name: "Student" + +# loss function config for traing/eval process +Loss: + Train: + - DistillationDMLLoss: + weight: 1.0 + model_name_pairs: + - ["Student", "Teacher"] + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train_for_distillation.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - DistillationTopkAcc: + model_key: "Student" + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..27fbc4b862073723004f6bb6ad679dff8d78214a --- /dev/null +++ b/ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml @@ -0,0 +1,148 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 0 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + + +# model architecture +Arch: + name: PPLCNet_x1_0 + class_num: 232 + pretrained: True + # use_ssld: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: deploy/images/PULC/traffic_sign/99603_17806.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/PULC/traffic_sign/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/PULC/traffic_sign/SwinTransformer_tiny_patch4_window7_224.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae86ae6220b58b9535b60004ce3140ab29380621 --- /dev/null +++ b/ppcls/configs/PULC/traffic_sign/SwinTransformer_tiny_patch4_window7_224.yaml @@ -0,0 +1,170 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + start_eval_epoch: 0 + epochs: 10 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: SwinTransformer_tiny_patch4_window7_224 + class_num: 232 + pretrained: True + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm + one_dim_param_no_weight_decay: True + lr: + name: Cosine + learning_rate: 2e-4 + eta_min: 2e-6 + warmup_epoch: 5 + warmup_start_lr: 2e-7 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_train.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + interpolation: bicubic + backend: pil + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.8 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/traffic_sign/label_list_test.txt + delimiter: "\t" + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/PULC_label_list/traffic_sign_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] + + diff --git a/ppcls/configs/PULC/traffic_sign/search.yaml b/ppcls/configs/PULC/traffic_sign/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..029d042dff669be9a4c3751d65e9da70e8c47a73 --- /dev/null +++ b/ppcls/configs/PULC/traffic_sign/search.yaml @@ -0,0 +1,41 @@ +base_config_file: ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/traffic_sign/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_traffic_sign +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.4.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + algorithm: "skl-ugi" + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/PULC/vehicle_attribute/MobileNetV3_small_x0_35.yaml b/ppcls/configs/PULC/vehicle_attribute/MobileNetV3_small_x0_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a35bc61145a4ed97c1f02ba3b4c587b8686aaa14 --- /dev/null +++ b/ppcls/configs/PULC/vehicle_attribute/MobileNetV3_small_x0_35.yaml @@ -0,0 +1,115 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "MobileNetV3_small_x0_35" + pretrained: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml b/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a3369a9eef9c8bc54ee5b26582f0a6c4ede789fd --- /dev/null +++ b/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml @@ -0,0 +1,149 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + class_num: 19 + use_ssld: True + lr_mult_list: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.0125 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: VehicleAttribute + color_threshold: 0.5 + type_threshold: 0.5 + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml b/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d098ca81f303fa19a0ebf71145143c2c982dba39 --- /dev/null +++ b/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_distillation.yaml @@ -0,0 +1,171 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "DistillationModel" + class_num: &class_num 19 + # if not null, its lengths should be same as models + pretrained_list: + # if not null, its lengths should be same as models + infer_model_name: "Student" + freeze_params_list: + - True + - False + use_ssld: True + models: + - Teacher: + name: ResNet101_vd + class_num: *class_num + - Student: + name: PPLCNet_x1_0 + class_num: *class_num + pretrained: True + use_ssld: True + +# loss function config for traing/eval process +Loss: + Train: + - DistillationMultiLabelLoss: + weight: 1.0 + model_names: ["Student"] + weight_ratio: True + size_sum: True + - DistillationDMLLoss: + weight: 1.0 + weight_ratio: True + sum_across_class_dim: False + model_name_pairs: + - ["Student", "Teacher"] + + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + +Infer: + infer_imgs: ./deploy/images/PULC/vehicle_attribute/0002_c002_00030670_0.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: VehicleAttribute + color_threshold: 0.5 + type_threshold: 0.5 + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_search.yaml b/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5f84c2a6552c2a688c2311a1af2e695f047d4402 --- /dev/null +++ b/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0_search.yaml @@ -0,0 +1,129 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "PPLCNet_x1_0" + pretrained: True + use_ssld: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - TimmAutoAugment: + prob: 0.0 + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.0 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/vehicle_attribute/Res2Net200_vd_26w_4s.yaml b/ppcls/configs/PULC/vehicle_attribute/Res2Net200_vd_26w_4s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c6618f960571a6161ca939210c9b21df6d1d847c --- /dev/null +++ b/ppcls/configs/PULC/vehicle_attribute/Res2Net200_vd_26w_4s.yaml @@ -0,0 +1,122 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/mo" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# mixed precision training +AMP: + scale_loss: 128.0 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model architecture +Arch: + name: "Res2Net200_vd_26w_4s" + pretrained: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/vehicle_attribute/ResNet50.yaml b/ppcls/configs/PULC/vehicle_attribute/ResNet50.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9218769c634949f9df44580aeb8e65df19805b9d --- /dev/null +++ b/ppcls/configs/PULC/vehicle_attribute/ResNet50.yaml @@ -0,0 +1,116 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 5 + eval_during_train: True + eval_interval: 1 + epochs: 30 + print_batch_step: 20 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 192, 256] + save_inference_dir: "./inference" + use_multilabel: True + +# model architecture +Arch: + name: "ResNet50" + pretrained: True + class_num: 19 + infer_add_softmax: False + +# loss function config for traing/eval process +Loss: + Train: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + Eval: + - MultiLabelLoss: + weight: 1.0 + weight_ratio: True + size_sum: True + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.01 + warmup_epoch: 5 + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/train_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - Padv2: + size: [276, 212] + pad_mode: 1 + fill_value: 0 + - RandomCropImage: + size: [256, 192] + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: True + shuffle: True + loader: + num_workers: 8 + use_shared_memory: True + Eval: + dataset: + name: MultiLabelDataset + image_root: "dataset/VeRi/" + cls_label_path: "dataset/VeRi/test_list.txt" + label_ratio: True + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: [256, 192] + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 8 + use_shared_memory: True + + +Metric: + Eval: + - ATTRMetric: + + diff --git a/ppcls/configs/PULC/vehicle_attribute/search.yaml b/ppcls/configs/PULC/vehicle_attribute/search.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a16266bf3a3592be8ccb169dee837024d0b1b06 --- /dev/null +++ b/ppcls/configs/PULC/vehicle_attribute/search.yaml @@ -0,0 +1,35 @@ +base_config_file: ppcls/configs/PULC/vehicle_attr/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/vehicle_attr/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_vehicle_attr +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.2.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.7.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + algorithm: "skl-ugi" + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list diff --git a/ppcls/configs/Pedestrian/strong_baseline_baseline.yaml b/ppcls/configs/Pedestrian/strong_baseline_baseline.yaml deleted file mode 100644 index 3a3608ed0369d3ad06d194957c8fa81adbb809a8..0000000000000000000000000000000000000000 --- a/ppcls/configs/Pedestrian/strong_baseline_baseline.yaml +++ /dev/null @@ -1,154 +0,0 @@ -# global configs -Global: - checkpoints: null - pretrained_model: null - output_dir: "./output/" - device: "gpu" - save_interval: 40 - eval_during_train: True - eval_interval: 10 - epochs: 120 - print_batch_step: 20 - use_visualdl: False - eval_mode: "retrieval" - retrieval_feature_from: "backbone" # 'backbone' or 'neck' - # used for static mode and model export - image_shape: [3, 256, 128] - save_inference_dir: "./inference" - -# model architecture -Arch: - name: "RecModel" - infer_output_key: "features" - infer_add_softmax: False - Backbone: - name: "ResNet50" - pretrained: True - stem_act: null - BackboneStopLayer: - name: "flatten" - Head: - name: "FC" - embedding_size: 2048 - class_num: 751 - weight_attr: - initializer: - name: KaimingUniform - fan_in: 12288 # 6*embedding_size - bias_attr: - initializer: - name: KaimingUniform - fan_in: 12288 # 6*embedding_size - -# loss function config for traing/eval process -Loss: - Train: - - CELoss: - weight: 1.0 - - TripletLossV2: - weight: 1.0 - margin: 0.3 - normalize_feature: False - feature_from: "backbone" - Eval: - - CELoss: - weight: 1.0 - -Optimizer: - name: Adam - lr: - name: Piecewise - decay_epochs: [40, 70] - values: [0.00035, 0.000035, 0.0000035] - by_epoch: True - last_epoch: 0 - regularizer: - name: 'L2' - coeff: 0.0005 - -# data loader for train and eval -DataLoader: - Train: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "bounding_box_train" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - RandFlipImage: - flip_code: 1 - - Pad: - padding: 10 - - RandCropImageV2: - size: [128, 256] - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - sampler: - name: DistributedRandomIdentitySampler - batch_size: 64 - num_instances: 4 - drop_last: False - shuffle: True - loader: - num_workers: 4 - use_shared_memory: True - Eval: - Query: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "query" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - sampler: - name: DistributedBatchSampler - batch_size: 128 - drop_last: False - shuffle: False - loader: - num_workers: 4 - use_shared_memory: True - - Gallery: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "bounding_box_test" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - sampler: - name: DistributedBatchSampler - batch_size: 128 - drop_last: False - shuffle: False - loader: - num_workers: 4 - use_shared_memory: True - -Metric: - Eval: - - Recallk: - topk: [1, 5] - - mAP: {} diff --git a/ppcls/configs/Pedestrian/strong_baseline_m1.yaml b/ppcls/configs/Pedestrian/strong_baseline_m1.yaml deleted file mode 100644 index ef4b605aee5de905494b67beda0bd545a8b12fcb..0000000000000000000000000000000000000000 --- a/ppcls/configs/Pedestrian/strong_baseline_m1.yaml +++ /dev/null @@ -1,172 +0,0 @@ -# global configs -Global: - checkpoints: null - pretrained_model: null - output_dir: "./output/" - device: "gpu" - save_interval: 40 - eval_during_train: True - eval_interval: 10 - epochs: 120 - print_batch_step: 20 - use_visualdl: False - eval_mode: "retrieval" - retrieval_feature_from: "features" # 'backbone' or 'features' - # used for static mode and model export - image_shape: [3, 256, 128] - save_inference_dir: "./inference" - -# model architecture -Arch: - name: "RecModel" - infer_output_key: "features" - infer_add_softmax: False - Backbone: - name: "ResNet50_last_stage_stride1" - pretrained: True - stem_act: null - BackboneStopLayer: - name: "flatten" - Neck: - name: BNNeck - num_features: &feat_dim 2048 - weight_attr: - initializer: - name: Constant - value: 1.0 - bias_attr: - initializer: - name: Constant - value: 0.0 - learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias to zero - Head: - name: "FC" - embedding_size: *feat_dim - class_num: 751 - weight_attr: - initializer: - name: Normal - std: 0.001 - bias_attr: False - -# loss function config for traing/eval process -Loss: - Train: - - CELoss: - weight: 1.0 - epsilon: 0.1 - - TripletLossV2: - weight: 1.0 - margin: 0.3 - normalize_feature: False - feature_from: "backbone" - Eval: - - CELoss: - weight: 1.0 - -Optimizer: - name: Adam - lr: - name: Piecewise - decay_epochs: [30, 60] - values: [0.00035, 0.000035, 0.0000035] - warmup_epoch: 10 - warmup_start_lr: 0.0000035 - by_epoch: True - last_epoch: 0 - regularizer: - name: 'L2' - coeff: 0.0005 - -# data loader for train and eval -DataLoader: - Train: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "bounding_box_train" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - RandFlipImage: - flip_code: 1 - - Pad: - padding: 10 - - RandCropImageV2: - size: [128, 256] - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - - RandomErasing: - EPSILON: 0.5 - sl: 0.02 - sh: 0.4 - r1: 0.3 - mean: [0.485, 0.456, 0.406] - sampler: - name: DistributedRandomIdentitySampler - batch_size: 64 - num_instances: 4 - drop_last: False - shuffle: True - loader: - num_workers: 4 - use_shared_memory: True - Eval: - Query: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "query" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - sampler: - name: DistributedBatchSampler - batch_size: 128 - drop_last: False - shuffle: False - loader: - num_workers: 4 - use_shared_memory: True - - Gallery: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "bounding_box_test" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - sampler: - name: DistributedBatchSampler - batch_size: 128 - drop_last: False - shuffle: False - loader: - num_workers: 4 - use_shared_memory: True - -Metric: - Eval: - - Recallk: - topk: [1, 5] - - mAP: {} diff --git a/ppcls/configs/Pedestrian/strong_baseline_m1_centerloss.yaml b/ppcls/configs/Pedestrian/strong_baseline_m1_centerloss.yaml deleted file mode 100644 index 6c14bb209875354d9bc0e485aa4aa8b910d116b9..0000000000000000000000000000000000000000 --- a/ppcls/configs/Pedestrian/strong_baseline_m1_centerloss.yaml +++ /dev/null @@ -1,183 +0,0 @@ -# global configs -Global: - checkpoints: null - pretrained_model: null - output_dir: "./output/" - device: "gpu" - save_interval: 40 - eval_during_train: True - eval_interval: 10 - epochs: 120 - print_batch_step: 20 - use_visualdl: False - eval_mode: "retrieval" - retrieval_feature_from: "features" # 'backbone' or 'features' - # used for static mode and model export - image_shape: [3, 256, 128] - save_inference_dir: "./inference" - -# model architecture -Arch: - name: "RecModel" - infer_output_key: "features" - infer_add_softmax: False - Backbone: - name: "ResNet50_last_stage_stride1" - pretrained: True - stem_act: null - BackboneStopLayer: - name: "flatten" - Neck: - name: BNNeck - num_features: &feat_dim 2048 - weight_attr: - initializer: - name: Constant - value: 1.0 - bias_attr: - initializer: - name: Constant - value: 0.0 - learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias to zero - Head: - name: "FC" - embedding_size: *feat_dim - class_num: &class_num 751 - weight_attr: - initializer: - name: Normal - std: 0.001 - bias_attr: False - -# loss function config for traing/eval process -Loss: - Train: - - CELoss: - weight: 1.0 - epsilon: 0.1 - - TripletLossV2: - weight: 1.0 - margin: 0.3 - normalize_feature: False - feature_from: "backbone" - - CenterLoss: - weight: 0.0005 - num_classes: *class_num - feat_dim: *feat_dim - feature_from: "backbone" - Eval: - - CELoss: - weight: 1.0 - -Optimizer: - - Adam: - scope: RecModel - lr: - name: Piecewise - decay_epochs: [30, 60] - values: [0.00035, 0.000035, 0.0000035] - warmup_epoch: 10 - warmup_start_lr: 0.0000035 - by_epoch: True - last_epoch: 0 - regularizer: - name: 'L2' - coeff: 0.0005 - - SGD: - scope: CenterLoss - lr: - name: Constant - learning_rate: 1000.0 # NOTE: set to ori_lr*(1/centerloss_weight) to avoid manually scaling centers' gradidents. - -# data loader for train and eval -DataLoader: - Train: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "bounding_box_train" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - RandFlipImage: - flip_code: 1 - - Pad: - padding: 10 - - RandCropImageV2: - size: [128, 256] - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - - RandomErasing: - EPSILON: 0.5 - sl: 0.02 - sh: 0.4 - r1: 0.3 - mean: [0.485, 0.456, 0.406] - sampler: - name: DistributedRandomIdentitySampler - batch_size: 64 - num_instances: 4 - drop_last: False - shuffle: True - loader: - num_workers: 4 - use_shared_memory: True - Eval: - Query: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "query" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - sampler: - name: DistributedBatchSampler - batch_size: 128 - drop_last: False - shuffle: False - loader: - num_workers: 4 - use_shared_memory: True - - Gallery: - dataset: - name: "Market1501" - image_root: "./dataset/" - cls_label_path: "bounding_box_test" - backend: "pil" - transform_ops: - - ResizeImage: - size: [128, 256] - return_numpy: False - backend: "pil" - - ToTensor: - - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - sampler: - name: DistributedBatchSampler - batch_size: 128 - drop_last: False - shuffle: False - loader: - num_workers: 4 - use_shared_memory: True - -Metric: - Eval: - - Recallk: - topk: [1, 5] - - mAP: {} diff --git a/ppcls/configs/StrategySearch/person.yaml b/ppcls/configs/StrategySearch/person.yaml new file mode 100644 index 0000000000000000000000000000000000000000..906635595f33417cf564ca54a430c3c648fd738d --- /dev/null +++ b/ppcls/configs/StrategySearch/person.yaml @@ -0,0 +1,40 @@ +base_config_file: ppcls/configs/PULC/person/PPLCNet/PPLCNet_x1_0_search.yaml +distill_config_file: ppcls/configs/PULC/person/Distillation/PPLCNet_x1_0_distillation.yaml + +gpus: 0,1,2,3 +output_dir: output/search_person +search_times: 1 +search_dict: + - search_key: lrs + replace_config: + - Optimizer.lr.learning_rate + search_values: [0.0075, 0.01, 0.0125] + - search_key: resolutions + replace_config: + - DataLoader.Train.dataset.transform_ops.1.RandCropImage.size + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size + search_values: [176, 192, 224] + - search_key: ra_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob + search_values: [0.0, 0.1, 0.5] + - search_key: re_probs + replace_config: + - DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON + search_values: [0.0, 0.1, 0.5] + - search_key: lr_mult_list + replace_config: + - Arch.lr_mult_list + search_values: + - [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] + - [0.0, 0.4, 0.4, 0.8, 0.8, 1.0] + - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] +teacher: + rm_keys: + - Arch.lr_mult_list + search_values: + - ResNet101_vd + - ResNet50_vd +final_replace: + Arch.lr_mult_list: Arch.models.1.Student.lr_mult_list + diff --git a/ppcls/configs/metric_learning/adaface_ir18.yaml b/ppcls/configs/metric_learning/adaface_ir18.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cbfe5da43763701b244b2422bf9ad82b19ef4d6 --- /dev/null +++ b/ppcls/configs/metric_learning/adaface_ir18.yaml @@ -0,0 +1,105 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 26 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 112, 112] + save_inference_dir: "./inference" + eval_mode: "adaface" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "AdaFace_IR_18" + input_size: [112, 112] + Head: + name: "AdaMargin" + embedding_size: 512 + class_num: 70722 + m: 0.4 + s: 64 + h: 0.333 + t_alpha: 0.01 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [12, 20, 24] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "AdaFaceDataset" + root_dir: "dataset/face/" + label_path: "dataset/face/train_filter_label.txt" + transform: + - CropWithPadding: + prob: 0.2 + padding_num: 0 + size: [112, 112] + scale: [0.2, 1.0] + ratio: [0.75, 1.3333333333333333] + - RandomInterpolationAugment: + prob: 0.2 + - ColorJitter: + prob: 0.2 + brightness: 0.5 + contrast: 0.5 + saturation: 0.5 + hue: 0 + - RandomHorizontalFlip: + - ToTensor: + - Normalize: + mean: [0.5, 0.5, 0.5] + std: [0.5, 0.5, 0.5] + sampler: + name: DistributedBatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True + + Eval: + dataset: + name: FiveValidationDataset + val_data_path: dataset/face/faces_emore + concat_mem_file_name: dataset/face/faces_emore/concat_validation_memfile + sampler: + name: BatchSampler + batch_size: 256 + drop_last: False + shuffle: True + loader: + num_workers: 6 + use_shared_memory: True +Metric: + Train: + - TopkAcc: + topk: [1, 5] \ No newline at end of file diff --git a/ppcls/configs/multi_scale/MobileNetV1_multi_scale.yaml b/ppcls/configs/multi_scale/MobileNetV1_multi_scale.yaml new file mode 100644 index 0000000000000000000000000000000000000000..530e7507519ed37dd1126633738c903769fe697e --- /dev/null +++ b/ppcls/configs/multi_scale/MobileNetV1_multi_scale.yaml @@ -0,0 +1,138 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 120 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + +# model architecture +Arch: + name: MobileNetV1 + class_num: 1000 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + Eval: + - CELoss: + weight: 1.0 + + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Piecewise + learning_rate: 0.1 + decay_epochs: [30, 60, 90] + values: [0.1, 0.01, 0.001, 0.0001] + regularizer: + name: 'L2' + coeff: 0.00003 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + + # support to specify width and height respectively: + # scales: [(160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 64 + divided_factor: 32 + is_training: True + + loader: + num_workers: 4 + use_shared_memory: True + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 64 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/whl/demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/docs/zh_CN/samples/.gitkeep b/ppcls/configs/practical_models/.gitkeep similarity index 100% rename from docs/zh_CN/samples/.gitkeep rename to ppcls/configs/practical_models/.gitkeep diff --git a/ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml b/ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c6415cd47ca04ebb6a89e786b7f63ee85b12ac07 --- /dev/null +++ b/ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml @@ -0,0 +1,150 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 50 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 224, 224] + save_inference_dir: ./inference + # training model under @to_static + to_static: False + use_dali: False + +# model architecture +Arch: + name: PPHGNet_tiny + class_num: 2 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: Cosine + learning_rate: 0.05 + warmup_epoch: 3 + regularizer: + name: 'L2' + coeff: 0.00004 + + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/phone_train_list_halfbody.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m7-mstd0.5-inc1 + interpolation: bicubic + img_size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: True + loader: + num_workers: 2 + use_shared_memory: False + + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ + cls_label_path: ./dataset/phone_val_list_halfbody.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + interpolation: bicubic + backend: pil + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 2 + use_shared_memory: False + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 1 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + size: 224 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 2 + class_id_map_file: dataset/phone_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 1] + Eval: + - TopkAcc: + topk: [1, 1] diff --git a/ppcls/configs/quick_start/professional/MobileNetV1_multilabel.yaml b/ppcls/configs/quick_start/professional/MobileNetV1_multilabel.yaml index 6838710c326af0d9a549a3340ffd1326d784dced..969d8161f2f3d9bb60f7afe9bd1d33ff155a6d95 100644 --- a/ppcls/configs/quick_start/professional/MobileNetV1_multilabel.yaml +++ b/ppcls/configs/quick_start/professional/MobileNetV1_multilabel.yaml @@ -122,8 +122,8 @@ Infer: Metric: Train: - - HammingDistance: - AccuracyScore: - Eval: - HammingDistance: + Eval: - AccuracyScore: + - HammingDistance: diff --git a/ppcls/configs/reid/strong_baseline/baseline.yaml b/ppcls/configs/reid/strong_baseline/baseline.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c83b8da850f507964ede94ccefc5203f765bcab --- /dev/null +++ b/ppcls/configs/reid/strong_baseline/baseline.yaml @@ -0,0 +1,158 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 40 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 20 + use_visualdl: False + eval_mode: "retrieval" + retrieval_feature_from: "backbone" # 'backbone' or 'neck' + re_ranking: False + # used for static mode and model export + image_shape: [3, 256, 128] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50" + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + stem_act: null + BackboneStopLayer: + name: "flatten" + Head: + name: "FC" + embedding_size: 2048 + class_num: 751 + weight_attr: + initializer: + name: KaimingUniform + fan_in: 12288 # 6*embedding_size + bias_attr: + initializer: + name: KaimingUniform + fan_in: 12288 # 6*embedding_size + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + - TripletLossV2: + weight: 1.0 + margin: 0.3 + normalize_feature: False + feature_from: "backbone" + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Adam + lr: + name: Piecewise + decay_epochs: [40, 70] + values: [0.00035, 0.000035, 0.0000035] + by_epoch: True + last_epoch: 0 + regularizer: + name: "L2" + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 4 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + Query: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "query" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_test" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/ppcls/configs/reid/strong_baseline/softmax_triplet.yaml b/ppcls/configs/reid/strong_baseline/softmax_triplet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..43f1de62fb6317d69bcd1e6c32a1d5af5e461ef8 --- /dev/null +++ b/ppcls/configs/reid/strong_baseline/softmax_triplet.yaml @@ -0,0 +1,176 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 40 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 20 + use_visualdl: False + eval_mode: "retrieval" + retrieval_feature_from: "features" # 'backbone' or 'features' + re_ranking: False + # used for static mode and model export + image_shape: [3, 256, 128] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + stem_act: null + BackboneStopLayer: + name: "flatten" + Neck: + name: BNNeck + num_features: &feat_dim 2048 + weight_attr: + initializer: + name: Constant + value: 1.0 + bias_attr: + initializer: + name: Constant + value: 0.0 + learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias to zero + Head: + name: "FC" + embedding_size: *feat_dim + class_num: 751 + weight_attr: + initializer: + name: Normal + std: 0.001 + bias_attr: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + - TripletLossV2: + weight: 1.0 + margin: 0.3 + normalize_feature: False + feature_from: "backbone" + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: Adam + lr: + name: Piecewise + decay_epochs: [30, 60] + values: [0.00035, 0.000035, 0.0000035] + warmup_epoch: 10 + warmup_start_lr: 0.0000035 + by_epoch: True + last_epoch: 0 + regularizer: + name: "L2" + coeff: 0.0005 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0.485, 0.456, 0.406] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 4 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + Query: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "query" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_test" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml b/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b500fb20390a1bbf9cb3f9cc5b7492fa5dacd7a5 --- /dev/null +++ b/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml @@ -0,0 +1,187 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: "./output/" + device: "gpu" + save_interval: 40 + eval_during_train: True + eval_interval: 10 + epochs: 120 + print_batch_step: 20 + use_visualdl: False + eval_mode: "retrieval" + retrieval_feature_from: "features" # 'backbone' or 'features' + re_ranking: False + # used for static mode and model export + image_shape: [3, 256, 128] + save_inference_dir: "./inference" + +# model architecture +Arch: + name: "RecModel" + infer_output_key: "features" + infer_add_softmax: False + Backbone: + name: "ResNet50_last_stage_stride1" + pretrained: https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/others/resnet50-19c8e357_torch2paddle.pdparams + stem_act: null + BackboneStopLayer: + name: "flatten" + Neck: + name: BNNeck + num_features: &feat_dim 2048 + weight_attr: + initializer: + name: Constant + value: 1.0 + bias_attr: + initializer: + name: Constant + value: 0.0 + learning_rate: 1.0e-20 # NOTE: Temporarily set lr small enough to freeze the bias to zero + Head: + name: "FC" + embedding_size: *feat_dim + class_num: &class_num 751 + weight_attr: + initializer: + name: Normal + std: 0.001 + bias_attr: False + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + - TripletLossV2: + weight: 1.0 + margin: 0.3 + normalize_feature: False + feature_from: "backbone" + - CenterLoss: + weight: 0.0005 + num_classes: *class_num + feat_dim: *feat_dim + feature_from: "backbone" + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + - Adam: + scope: RecModel + lr: + name: Piecewise + decay_epochs: [30, 60] + values: [0.00035, 0.000035, 0.0000035] + warmup_epoch: 10 + warmup_start_lr: 0.0000035 + by_epoch: True + last_epoch: 0 + regularizer: + name: "L2" + coeff: 0.0005 + - SGD: + scope: CenterLoss + lr: + name: Constant + learning_rate: 1000.0 # NOTE: set to ori_lr*(1/centerloss_weight) to avoid manually scaling centers' gradidents. + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_train" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - RandFlipImage: + flip_code: 1 + - Pad: + padding: 10 + - RandCropImageV2: + size: [128, 256] + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + - RandomErasing: + EPSILON: 0.5 + sl: 0.02 + sh: 0.4 + r1: 0.3 + mean: [0.485, 0.456, 0.406] + sampler: + name: DistributedRandomIdentitySampler + batch_size: 64 + num_instances: 4 + drop_last: False + shuffle: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + Query: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "query" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + + Gallery: + dataset: + name: "Market1501" + image_root: "./dataset/" + cls_label_path: "bounding_box_test" + backend: "pil" + transform_ops: + - ResizeImage: + size: [128, 256] + return_numpy: False + interpolation: "bilinear" + backend: "pil" + - ToTensor: + - Normalize: + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Metric: + Eval: + - Recallk: + topk: [1, 5] + - mAP: {} diff --git a/ppcls/data/__init__.py b/ppcls/data/__init__.py index 9722bfb85a9a93d007507174ec17b1b95738270c..80cf3bc9af826e935fe0fe6ccf8cad8d6924d370 100644 --- a/ppcls/data/__init__.py +++ b/ppcls/data/__init__.py @@ -28,12 +28,16 @@ from ppcls.data.dataloader.vehicle_dataset import CompCars, VeriWild from ppcls.data.dataloader.logo_dataset import LogoDataset from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset from ppcls.data.dataloader.mix_dataset import MixDataset +from ppcls.data.dataloader.multi_scale_dataset import MultiScaleDataset from ppcls.data.dataloader.person_dataset import Market1501, MSMT17 +from ppcls.data.dataloader.face_dataset import FiveValidationDataset, AdaFaceDataset + # sampler from ppcls.data.dataloader.DistributedRandomIdentitySampler import DistributedRandomIdentitySampler from ppcls.data.dataloader.pk_sampler import PKSampler from ppcls.data.dataloader.mix_sampler import MixSampler +from ppcls.data.dataloader.multi_scale_sampler import MultiScaleSampler from ppcls.data import preprocess from ppcls.data.preprocess import transform @@ -85,7 +89,7 @@ def build_dataloader(config, mode, device, use_dali=False, seed=None): # build sampler config_sampler = config[mode]['sampler'] - if "name" not in config_sampler: + if config_sampler and "name" not in config_sampler: batch_sampler = None batch_size = config_sampler["batch_size"] drop_last = config_sampler["drop_last"] diff --git a/ppcls/data/dataloader/__init__.py b/ppcls/data/dataloader/__init__.py index 271a8f5cbfa164dbd6803312cf2d468f8c9bdc82..796f4b458410e5b4b8540b72dd663711c4ad9f46 100644 --- a/ppcls/data/dataloader/__init__.py +++ b/ppcls/data/dataloader/__init__.py @@ -5,6 +5,9 @@ from ppcls.data.dataloader.vehicle_dataset import CompCars, VeriWild from ppcls.data.dataloader.logo_dataset import LogoDataset from ppcls.data.dataloader.icartoon_dataset import ICartoonDataset from ppcls.data.dataloader.mix_dataset import MixDataset +from ppcls.data.dataloader.multi_scale_dataset import MultiScaleDataset from ppcls.data.dataloader.mix_sampler import MixSampler +from ppcls.data.dataloader.multi_scale_sampler import MultiScaleSampler from ppcls.data.dataloader.pk_sampler import PKSampler from ppcls.data.dataloader.person_dataset import Market1501, MSMT17 +from ppcls.data.dataloader.face_dataset import AdaFaceDataset, FiveValidationDataset diff --git a/ppcls/data/dataloader/common_dataset.py b/ppcls/data/dataloader/common_dataset.py index b7b03d8b9e06aa7aa190fb325c2221db3b666c5c..88bab0f1d059a53b5dc062a25e7286637086abb7 100644 --- a/ppcls/data/dataloader/common_dataset.py +++ b/ppcls/data/dataloader/common_dataset.py @@ -44,11 +44,11 @@ def create_operators(params): class CommonDataset(Dataset): - def __init__( - self, - image_root, - cls_label_path, - transform_ops=None, ): + def __init__(self, + image_root, + cls_label_path, + transform_ops=None, + label_ratio=False): self._img_root = image_root self._cls_path = cls_label_path if transform_ops: @@ -56,7 +56,10 @@ class CommonDataset(Dataset): self.images = [] self.labels = [] - self._load_anno() + if label_ratio: + self.label_ratio = self._load_anno(label_ratio=label_ratio) + else: + self._load_anno() def _load_anno(self): pass diff --git a/ppcls/data/dataloader/dali.py b/ppcls/data/dataloader/dali.py index a15c231568a97fd607f2ada4f5f6e81fa084cc62..faef45e26b3dee2e17464a502f42f9886eac6518 100644 --- a/ppcls/data/dataloader/dali.py +++ b/ppcls/data/dataloader/dali.py @@ -23,7 +23,6 @@ import nvidia.dali.types as types import paddle from nvidia.dali import fn from nvidia.dali.pipeline import Pipeline -from nvidia.dali.plugin.base_iterator import LastBatchPolicy from nvidia.dali.plugin.paddle import DALIGenericIterator @@ -230,7 +229,7 @@ def dali_dataloader(config, mode, device, seed=None): lower = ratio[0] upper = ratio[1] - if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env: + if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env and 'FLAGS_selected_gpus' in env: shard_id = int(env['PADDLE_TRAINER_ID']) num_shards = int(env['PADDLE_TRAINERS_NUM']) device_id = int(env['FLAGS_selected_gpus']) @@ -282,7 +281,7 @@ def dali_dataloader(config, mode, device, seed=None): else: resize_shorter = transforms["ResizeImage"].get("resize_short", 256) crop = transforms["CropImage"]["size"] - if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env and sampler_name == "DistributedBatchSampler": + if 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env and 'FLAGS_selected_gpus' in env and sampler_name == "DistributedBatchSampler": shard_id = int(env['PADDLE_TRAINER_ID']) num_shards = int(env['PADDLE_TRAINERS_NUM']) device_id = int(env['FLAGS_selected_gpus']) diff --git a/ppcls/data/dataloader/face_dataset.py b/ppcls/data/dataloader/face_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a32cc2c5f89aa8c8e4904e7decc6ec5fb996aab3 --- /dev/null +++ b/ppcls/data/dataloader/face_dataset.py @@ -0,0 +1,163 @@ +import os +import json +import numpy as np +from PIL import Image +import cv2 +import paddle +import paddle.vision.datasets as datasets +from paddle.vision import transforms +from paddle.vision.transforms import functional as F +from paddle.io import Dataset +from .common_dataset import create_operators +from ppcls.data.preprocess import transform as transform_func + +# code is based on AdaFace: https://github.com/mk-minchul/AdaFace + + +class AdaFaceDataset(Dataset): + def __init__(self, root_dir, label_path, transform=None): + self.root_dir = root_dir + self.transform = create_operators(transform) + + with open(label_path) as fd: + lines = fd.readlines() + self.samples = [] + for l in lines: + l = l.strip().split() + self.samples.append([os.path.join(root_dir, l[0]), int(l[1])]) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (sample, target) where target is class_index of the target class. + """ + [path, target] = self.samples[index] + with open(path, 'rb') as f: + img = Image.open(f) + sample = img.convert('RGB') + + # if 'WebFace' in self.root: + # # swap rgb to bgr since image is in rgb for webface + # sample = Image.fromarray(np.asarray(sample)[:, :, ::-1] + if self.transform is not None: + sample = transform_func(sample, self.transform) + return sample, target + + +class FiveValidationDataset(Dataset): + def __init__(self, val_data_path, concat_mem_file_name): + ''' + concatenates all validation datasets from emore + val_data_dict = { + 'agedb_30': (agedb_30, agedb_30_issame), + "cfp_fp": (cfp_fp, cfp_fp_issame), + "lfw": (lfw, lfw_issame), + "cplfw": (cplfw, cplfw_issame), + "calfw": (calfw, calfw_issame), + } + agedb_30: 0 + cfp_fp: 1 + lfw: 2 + cplfw: 3 + calfw: 4 + ''' + val_data = get_val_data(val_data_path) + age_30, cfp_fp, lfw, age_30_issame, cfp_fp_issame, lfw_issame, cplfw, cplfw_issame, calfw, calfw_issame = val_data + val_data_dict = { + 'agedb_30': (age_30, age_30_issame), + "cfp_fp": (cfp_fp, cfp_fp_issame), + "lfw": (lfw, lfw_issame), + "cplfw": (cplfw, cplfw_issame), + "calfw": (calfw, calfw_issame), + } + self.dataname_to_idx = { + "agedb_30": 0, + "cfp_fp": 1, + "lfw": 2, + "cplfw": 3, + "calfw": 4 + } + + self.val_data_dict = val_data_dict + # concat all dataset + all_imgs = [] + all_issame = [] + all_dataname = [] + key_orders = [] + for key, (imgs, issame) in val_data_dict.items(): + all_imgs.append(imgs) + dup_issame = [ + ] # hacky way to make the issame length same as imgs. [1, 1, 0, 0, ...] + for same in issame: + dup_issame.append(same) + dup_issame.append(same) + all_issame.append(dup_issame) + all_dataname.append([self.dataname_to_idx[key]] * len(imgs)) + key_orders.append(key) + assert key_orders == ['agedb_30', 'cfp_fp', 'lfw', 'cplfw', 'calfw'] + + if isinstance(all_imgs[0], np.memmap): + self.all_imgs = read_memmap(concat_mem_file_name) + else: + self.all_imgs = np.concatenate(all_imgs) + + self.all_issame = np.concatenate(all_issame) + self.all_dataname = np.concatenate(all_dataname) + + def __getitem__(self, index): + x_np = self.all_imgs[index].copy() + x = paddle.to_tensor(x_np) + y = self.all_issame[index] + dataname = self.all_dataname[index] + return x, y, dataname, index + + def __len__(self): + return len(self.all_imgs) + + +def read_memmap(mem_file_name): + # r+ mode: Open existing file for reading and writing + with open(mem_file_name + '.conf', 'r') as file: + memmap_configs = json.load(file) + return np.memmap(mem_file_name, mode='r+', \ + shape=tuple(memmap_configs['shape']), \ + dtype=memmap_configs['dtype']) + + +def get_val_pair(path, name, use_memfile=True): + # installing bcolz should set proxy to access internet + import bcolz + if use_memfile: + mem_file_dir = os.path.join(path, name, 'memfile') + mem_file_name = os.path.join(mem_file_dir, 'mem_file.dat') + if os.path.isdir(mem_file_dir): + print('laoding validation data memfile') + np_array = read_memmap(mem_file_name) + else: + os.makedirs(mem_file_dir) + carray = bcolz.carray(rootdir=os.path.join(path, name), mode='r') + np_array = np.array(carray) + # mem_array = make_memmap(mem_file_name, np_array) + # del np_array, mem_array + del np_array + np_array = read_memmap(mem_file_name) + else: + np_array = bcolz.carray(rootdir=os.path.join(path, name), mode='r') + + issame = np.load(os.path.join(path, '{}_list.npy'.format(name))) + return np_array, issame + + +def get_val_data(data_path): + agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30') + cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp') + lfw, lfw_issame = get_val_pair(data_path, 'lfw') + cplfw, cplfw_issame = get_val_pair(data_path, 'cplfw') + calfw, calfw_issame = get_val_pair(data_path, 'calfw') + return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame, cplfw, cplfw_issame, calfw, calfw_issame diff --git a/ppcls/data/dataloader/multi_scale_dataset.py b/ppcls/data/dataloader/multi_scale_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ddddf35ef5feca9817e380025d85a34b3989f12f --- /dev/null +++ b/ppcls/data/dataloader/multi_scale_dataset.py @@ -0,0 +1,107 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import os + +from paddle.io import Dataset +from paddle.vision import transforms +import cv2 +import warnings + +from ppcls.data import preprocess +from ppcls.data.preprocess import transform +from ppcls.data.preprocess.ops.operators import DecodeImage +from ppcls.utils import logger +from ppcls.data.dataloader.common_dataset import create_operators + + +class MultiScaleDataset(Dataset): + def __init__( + self, + image_root, + cls_label_path, + transform_ops=None, ): + self._img_root = image_root + self._cls_path = cls_label_path + self.transform_ops = transform_ops + self.images = [] + self.labels = [] + self._load_anno() + self.has_crop_flag = 1 + + def _load_anno(self, seed=None): + assert os.path.exists(self._cls_path) + assert os.path.exists(self._img_root) + self.images = [] + self.labels = [] + + with open(self._cls_path) as fd: + lines = fd.readlines() + if seed is not None: + np.random.RandomState(seed).shuffle(lines) + for l in lines: + l = l.strip().split(" ") + self.images.append(os.path.join(self._img_root, l[0])) + self.labels.append(np.int64(l[1])) + assert os.path.exists(self.images[-1]) + + def __getitem__(self, properties): + # properites is a tuple, contains (width, height, index) + img_width = properties[0] + img_height = properties[1] + index = properties[2] + has_crop = False + if self.transform_ops: + for i in range(len(self.transform_ops)): + op = self.transform_ops[i] + resize_op = ['RandCropImage', 'ResizeImage', 'CropImage'] + for resize in resize_op: + if resize in op: + if self.has_crop_flag: + logger.warning( + "Multi scale dataset will crop image according to the multi scale resolution" + ) + self.transform_ops[i][resize] = { + 'size': (img_width, img_height) + } + has_crop = True + self.has_crop_flag = 0 + if has_crop == False: + logger.error("Multi scale dateset requests RandCropImage") + raise RuntimeError("Multi scale dateset requests RandCropImage") + self._transform_ops = create_operators(self.transform_ops) + + try: + with open(self.images[index], 'rb') as f: + img = f.read() + if self._transform_ops: + img = transform(img, self._transform_ops) + img = img.transpose((2, 0, 1)) + return (img, self.labels[index]) + + except Exception as ex: + logger.error("Exception occured when parse line: {} with msg: {}". + format(self.images[index], ex)) + rnd_idx = np.random.randint(self.__len__()) + return self.__getitem__(rnd_idx) + + def __len__(self): + return len(self.images) + + @property + def class_num(self): + return len(set(self.labels)) diff --git a/ppcls/data/dataloader/multi_scale_sampler.py b/ppcls/data/dataloader/multi_scale_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..57b42b307dfb223c2ab434a89fc6c56b4e1e4a5c --- /dev/null +++ b/ppcls/data/dataloader/multi_scale_sampler.py @@ -0,0 +1,132 @@ +from paddle.io import Sampler +import paddle.distributed as dist + +import math +import random +import numpy as np + +from ppcls import data + + +class MultiScaleSampler(Sampler): + def __init__(self, + data_source, + scales, + first_bs, + divided_factor=32, + is_training=True, + seed=None): + """ + multi scale samper + Args: + data_source(dataset) + scales(list): several scales for image resolution + first_bs(int): batch size for the first scale in scales + divided_factor(int): ImageNet models down-sample images by a factor, ensure that width and height dimensions are multiples are multiple of devided_factor. + is_training(boolean): mode + """ + # min. and max. spatial dimensions + self.data_source = data_source + self.n_data_samples = len(self.data_source) + + if isinstance(scales[0], tuple): + width_dims = [i[0] for i in scales] + height_dims = [i[1] for i in scales] + elif isinstance(scales[0], int): + width_dims = scales + height_dims = scales + base_im_w = width_dims[0] + base_im_h = height_dims[0] + base_batch_size = first_bs + + # Get the GPU and node related information + num_replicas = dist.get_world_size() + rank = dist.get_rank() + # adjust the total samples to avoid batch dropping + num_samples_per_replica = int( + math.ceil(self.n_data_samples * 1.0 / num_replicas)) + img_indices = [idx for idx in range(self.n_data_samples)] + + self.shuffle = False + if is_training: + # compute the spatial dimensions and corresponding batch size + # ImageNet models down-sample images by a factor of 32. + # Ensure that width and height dimensions are multiples are multiple of 32. + width_dims = [ + int((w // divided_factor) * divided_factor) for w in width_dims + ] + height_dims = [ + int((h // divided_factor) * divided_factor) + for h in height_dims + ] + + img_batch_pairs = list() + base_elements = base_im_w * base_im_h * base_batch_size + for (h, w) in zip(height_dims, width_dims): + batch_size = int(max(1, (base_elements / (h * w)))) + img_batch_pairs.append((w, h, batch_size)) + self.img_batch_pairs = img_batch_pairs + self.shuffle = True + else: + self.img_batch_pairs = [(base_im_w, base_im_h, base_batch_size)] + + self.img_indices = img_indices + self.n_samples_per_replica = num_samples_per_replica + self.epoch = 0 + self.rank = rank + self.num_replicas = num_replicas + self.seed = seed + self.batch_list = [] + self.current = 0 + indices_rank_i = self.img_indices[self.rank:len(self.img_indices): + self.num_replicas] + while self.current < self.n_samples_per_replica: + curr_w, curr_h, curr_bsz = random.choice(self.img_batch_pairs) + + end_index = min(self.current + curr_bsz, + self.n_samples_per_replica) + + batch_ids = indices_rank_i[self.current:end_index] + n_batch_samples = len(batch_ids) + if n_batch_samples != curr_bsz: + batch_ids += indices_rank_i[:(curr_bsz - n_batch_samples)] + self.current += curr_bsz + + if len(batch_ids) > 0: + batch = [curr_w, curr_h, len(batch_ids)] + self.batch_list.append(batch) + self.length = len(self.batch_list) + + def __iter__(self): + if self.shuffle: + if self.seed is not None: + random.seed(self.seed) + else: + random.seed(self.epoch) + random.shuffle(self.img_indices) + random.shuffle(self.img_batch_pairs) + indices_rank_i = self.img_indices[self.rank:len(self.img_indices): + self.num_replicas] + else: + indices_rank_i = self.img_indices[self.rank:len(self.img_indices): + self.num_replicas] + + start_index = 0 + for batch_tuple in self.batch_list: + curr_w, curr_h, curr_bsz = batch_tuple + end_index = min(start_index + curr_bsz, self.n_samples_per_replica) + batch_ids = indices_rank_i[start_index:end_index] + n_batch_samples = len(batch_ids) + if n_batch_samples != curr_bsz: + batch_ids += indices_rank_i[:(curr_bsz - n_batch_samples)] + start_index += curr_bsz + + if len(batch_ids) > 0: + batch = [(curr_w, curr_h, b_id) for b_id in batch_ids] + yield batch + + def set_epoch(self, epoch: int): + self.epoch = epoch + + def __len__(self): + return self.length diff --git a/ppcls/data/dataloader/multilabel_dataset.py b/ppcls/data/dataloader/multilabel_dataset.py index 2c1ed770388035d2a9fa5a670948d9e1623a0406..c67a5ae78f2592bc9be91f5c087ffd9023cddd1b 100644 --- a/ppcls/data/dataloader/multilabel_dataset.py +++ b/ppcls/data/dataloader/multilabel_dataset.py @@ -25,9 +25,10 @@ from .common_dataset import CommonDataset class MultiLabelDataset(CommonDataset): - def _load_anno(self): + def _load_anno(self, label_ratio=False): assert os.path.exists(self._cls_path) assert os.path.exists(self._img_root) + self.label_ratio = label_ratio self.images = [] self.labels = [] with open(self._cls_path) as fd: @@ -41,6 +42,8 @@ class MultiLabelDataset(CommonDataset): self.labels.append(labels) assert os.path.exists(self.images[-1]) + if self.label_ratio is not False: + return np.array(self.labels).mean(0).astype("float32") def __getitem__(self, idx): try: @@ -50,7 +53,10 @@ class MultiLabelDataset(CommonDataset): img = transform(img, self._transform_ops) img = img.transpose((2, 0, 1)) label = np.array(self.labels[idx]).astype("float32") - return (img, label) + if self.label_ratio is not False: + return (img, np.array([label, self.label_ratio])) + else: + return (img, label) except Exception as ex: logger.error("Exception occured when parse line: {} with msg: {}". diff --git a/ppcls/data/postprocess/__init__.py b/ppcls/data/postprocess/__init__.py index 831a4da0008ba70824203be3a6f46c9700225457..6b8b7730bf6ac224cffb9f91ff88f230a14b45bf 100644 --- a/ppcls/data/postprocess/__init__.py +++ b/ppcls/data/postprocess/__init__.py @@ -14,9 +14,11 @@ import copy import importlib -from . import topk +from . import topk, threshoutput from .topk import Topk, MultiLabelTopk +from .threshoutput import ThreshOutput +from .attr_rec import VehicleAttribute, PersonAttribute def build_postprocess(config): diff --git a/ppcls/data/postprocess/attr_rec.py b/ppcls/data/postprocess/attr_rec.py new file mode 100644 index 0000000000000000000000000000000000000000..a8d492501833ac4ccd83d3aea108e7e34c46cadf --- /dev/null +++ b/ppcls/data/postprocess/attr_rec.py @@ -0,0 +1,173 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import numpy as np +import paddle +import paddle.nn.functional as F + + +class VehicleAttribute(object): + def __init__(self, color_threshold=0.5, type_threshold=0.5): + self.color_threshold = color_threshold + self.type_threshold = type_threshold + self.color_list = [ + "yellow", "orange", "green", "gray", "red", "blue", "white", + "golden", "brown", "black" + ] + self.type_list = [ + "sedan", "suv", "van", "hatchback", "mpv", "pickup", "bus", + "truck", "estate" + ] + + def __call__(self, x, file_names=None): + if isinstance(x, dict): + x = x['logits'] + assert isinstance(x, paddle.Tensor) + if file_names is not None: + assert x.shape[0] == len(file_names) + x = F.sigmoid(x).numpy() + + # postprocess output of predictor + batch_res = [] + for idx, res in enumerate(x): + res = res.tolist() + label_res = [] + color_idx = np.argmax(res[:10]) + type_idx = np.argmax(res[10:]) + print(color_idx, type_idx) + if res[color_idx] >= self.color_threshold: + color_info = f"Color: ({self.color_list[color_idx]}, prob: {res[color_idx]})" + else: + color_info = "Color unknown" + + if res[type_idx + 10] >= self.type_threshold: + type_info = f"Type: ({self.type_list[type_idx]}, prob: {res[type_idx + 10]})" + else: + type_info = "Type unknown" + + label_res = f"{color_info}, {type_info}" + + threshold_list = [self.color_threshold + ] * 10 + [self.type_threshold] * 9 + pred_res = (np.array(res) > np.array(threshold_list) + ).astype(np.int8).tolist() + batch_res.append({ + "attr": label_res, + "pred": pred_res, + "file_name": file_names[idx] + }) + return batch_res + + + +class PersonAttribute(object): + def __init__(self, + threshold=0.5, + glasses_threshold=0.3, + hold_threshold=0.6): + self.threshold = threshold + self.glasses_threshold = glasses_threshold + self.hold_threshold = hold_threshold + + def __call__(self, x, file_names=None): + if isinstance(x, dict): + x = x['logits'] + assert isinstance(x, paddle.Tensor) + if file_names is not None: + assert x.shape[0] == len(file_names) + x = F.sigmoid(x).numpy() + + # postprocess output of predictor + age_list = ['AgeLess18', 'Age18-60', 'AgeOver60'] + direct_list = ['Front', 'Side', 'Back'] + bag_list = ['HandBag', 'ShoulderBag', 'Backpack'] + upper_list = ['UpperStride', 'UpperLogo', 'UpperPlaid', 'UpperSplice'] + lower_list = [ + 'LowerStripe', 'LowerPattern', 'LongCoat', 'Trousers', 'Shorts', + 'Skirt&Dress' + ] + batch_res = [] + for idx, res in enumerate(x): + res = res.tolist() + label_res = [] + # gender + gender = 'Female' if res[22] > self.threshold else 'Male' + label_res.append(gender) + # age + age = age_list[np.argmax(res[19:22])] + label_res.append(age) + # direction + direction = direct_list[np.argmax(res[23:])] + label_res.append(direction) + # glasses + glasses = 'Glasses: ' + if res[1] > self.glasses_threshold: + glasses += 'True' + else: + glasses += 'False' + label_res.append(glasses) + # hat + hat = 'Hat: ' + if res[0] > self.threshold: + hat += 'True' + else: + hat += 'False' + label_res.append(hat) + # hold obj + hold_obj = 'HoldObjectsInFront: ' + if res[18] > self.hold_threshold: + hold_obj += 'True' + else: + hold_obj += 'False' + label_res.append(hold_obj) + # bag + bag = bag_list[np.argmax(res[15:18])] + bag_score = res[15 + np.argmax(res[15:18])] + bag_label = bag if bag_score > self.threshold else 'No bag' + label_res.append(bag_label) + # upper + upper_res = res[4:8] + upper_label = 'Upper:' + sleeve = 'LongSleeve' if res[3] > res[2] else 'ShortSleeve' + upper_label += ' {}'.format(sleeve) + for i, r in enumerate(upper_res): + if r > self.threshold: + upper_label += ' {}'.format(upper_list[i]) + label_res.append(upper_label) + # lower + lower_res = res[8:14] + lower_label = 'Lower: ' + has_lower = False + for i, l in enumerate(lower_res): + if l > self.threshold: + lower_label += ' {}'.format(lower_list[i]) + has_lower = True + if not has_lower: + lower_label += ' {}'.format(lower_list[np.argmax(lower_res)]) + + label_res.append(lower_label) + # shoe + shoe = 'Boots' if res[14] > self.threshold else 'No boots' + label_res.append(shoe) + + threshold_list = [0.5] * len(res) + threshold_list[1] = self.glasses_threshold + threshold_list[18] = self.hold_threshold + pred_res = (np.array(res) > np.array(threshold_list) + ).astype(np.int8).tolist() + + batch_res.append({"attributes": label_res, "output": pred_res}) + return batch_res + diff --git a/ppcls/data/postprocess/threshoutput.py b/ppcls/data/postprocess/threshoutput.py new file mode 100644 index 0000000000000000000000000000000000000000..607aecbfdeae018a5334f723effd658fb480713a --- /dev/null +++ b/ppcls/data/postprocess/threshoutput.py @@ -0,0 +1,36 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.nn.functional as F + + +class ThreshOutput(object): + def __init__(self, threshold, label_0="0", label_1="1"): + self.threshold = threshold + self.label_0 = label_0 + self.label_1 = label_1 + + def __call__(self, x, file_names=None): + y = [] + x = F.softmax(x, axis=-1).numpy() + for idx, probs in enumerate(x): + score = probs[1] + if score < self.threshold: + result = {"class_ids": [0], "scores": [1 - score], "label_names": [self.label_0]} + else: + result = {"class_ids": [1], "scores": [score], "label_names": [self.label_1]} + if file_names is not None: + result["file_name"] = file_names[idx] + y.append(result) + return y diff --git a/ppcls/data/postprocess/topk.py b/ppcls/data/postprocess/topk.py index df02719471300ea8e2b7c1db286d104adabe116f..76772f568eef157c4bb5e3485ea9ec5bc41f9d20 100644 --- a/ppcls/data/postprocess/topk.py +++ b/ppcls/data/postprocess/topk.py @@ -21,9 +21,9 @@ import paddle.nn.functional as F class Topk(object): def __init__(self, topk=1, class_id_map_file=None, delimiter=None): assert isinstance(topk, (int, )) - self.class_id_map = self.parse_class_id_map(class_id_map_file) self.topk = topk self.delimiter = delimiter if delimiter is not None else " " + self.class_id_map = self.parse_class_id_map(class_id_map_file) def parse_class_id_map(self, class_id_map_file): if class_id_map_file is None: diff --git a/ppcls/data/preprocess/__init__.py b/ppcls/data/preprocess/__init__.py index 62066016a47c8cef7bd31bc7d238f202ea6455f0..d0cfcf2409d2d890adcf03ef0e03b2475625ead8 100644 --- a/ppcls/data/preprocess/__init__.py +++ b/ppcls/data/preprocess/__init__.py @@ -33,11 +33,18 @@ from ppcls.data.preprocess.ops.operators import AugMix from ppcls.data.preprocess.ops.operators import Pad from ppcls.data.preprocess.ops.operators import ToTensor from ppcls.data.preprocess.ops.operators import Normalize +from ppcls.data.preprocess.ops.operators import RandomHorizontalFlip +from ppcls.data.preprocess.ops.operators import CropWithPadding +from ppcls.data.preprocess.ops.operators import RandomInterpolationAugment +from ppcls.data.preprocess.ops.operators import ColorJitter +from ppcls.data.preprocess.ops.operators import RandomCropImage +from ppcls.data.preprocess.ops.operators import Padv2 from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator import numpy as np from PIL import Image +import random def transform(data, ops=[]): @@ -88,16 +95,16 @@ class RandAugment(RawRandAugment): class TimmAutoAugment(RawTimmAutoAugment): """ TimmAutoAugment wrapper to auto fit different img tyeps. """ - def __init__(self, *args, **kwargs): + def __init__(self, prob=1.0, *args, **kwargs): super().__init__(*args, **kwargs) + self.prob = prob def __call__(self, img): if not isinstance(img, Image.Image): img = np.ascontiguousarray(img) img = Image.fromarray(img) - - img = super().__call__(img) - + if random.random() < self.prob: + img = super().__call__(img) if isinstance(img, Image.Image): img = np.asarray(img) diff --git a/ppcls/data/preprocess/ops/operators.py b/ppcls/data/preprocess/ops/operators.py index 157f44f1ab15ffd1162aeada37dba9296ee0ca00..e617b8a71afffeb9e18e4be412f5a3374bd387ec 100644 --- a/ppcls/data/preprocess/ops/operators.py +++ b/ppcls/data/preprocess/ops/operators.py @@ -18,6 +18,7 @@ from __future__ import print_function from __future__ import unicode_literals from functools import partial +import io import six import math import random @@ -25,8 +26,8 @@ import cv2 import numpy as np from PIL import Image, ImageOps, __version__ as PILLOW_VERSION from paddle.vision.transforms import ColorJitter as RawColorJitter -from paddle.vision.transforms import ToTensor, Normalize - +from paddle.vision.transforms import ToTensor, Normalize, RandomHorizontalFlip, RandomResizedCrop +from paddle.vision.transforms import functional as F from .autoaugment import ImageNetPolicy from .functional import augmentations from ppcls.utils import logger @@ -93,6 +94,42 @@ class UnifiedResize(object): return self.resize_func(src, size) +class RandomInterpolationAugment(object): + def __init__(self, prob): + self.prob = prob + + def _aug(self, img): + img_shape = img.shape + side_ratio = np.random.uniform(0.2, 1.0) + small_side = int(side_ratio * img_shape[0]) + interpolation = np.random.choice([ + cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, + cv2.INTER_CUBIC, cv2.INTER_LANCZOS4 + ]) + small_img = cv2.resize( + img, (small_side, small_side), interpolation=interpolation) + interpolation = np.random.choice([ + cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, + cv2.INTER_CUBIC, cv2.INTER_LANCZOS4 + ]) + aug_img = cv2.resize( + small_img, (img_shape[1], img_shape[0]), + interpolation=interpolation) + return aug_img + + def __call__(self, img): + if np.random.random() < self.prob: + if isinstance(img, np.ndarray): + return self._aug(img) + else: + pil_img = np.array(img) + aug_img = self._aug(pil_img) + img = Image.fromarray(aug_img.astype(np.uint8)) + return img + else: + return img + + class OperatorParamError(ValueError): """ OperatorParamError """ @@ -102,28 +139,53 @@ class OperatorParamError(ValueError): class DecodeImage(object): """ decode image """ - def __init__(self, to_rgb=True, to_np=False, channel_first=False): - self.to_rgb = to_rgb + def __init__(self, + to_np=True, + to_rgb=True, + channel_first=False, + backend="cv2"): self.to_np = to_np # to numpy + self.to_rgb = to_rgb # only enabled when to_np is True self.channel_first = channel_first # only enabled when to_np is True + if backend.lower() not in ["cv2", "pil"]: + logger.warning( + f"The backend of DecodeImage only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. Use \"cv2\" instead." + ) + backend = "cv2" + self.backend = backend.lower() + + if not to_np: + logger.warning( + f"\"to_rgb\" and \"channel_first\" are only enabled when to_np is True. \"to_np\" is now {to_np}." + ) + def __call__(self, img): - if not isinstance(img, np.ndarray): - if six.PY2: - assert type(img) is str and len( - img) > 0, "invalid input 'img' in DecodeImage" + if isinstance(img, Image.Image): + assert self.backend == "pil", "invalid input 'img' in DecodeImage" + elif isinstance(img, np.ndarray): + assert self.backend == "cv2", "invalid input 'img' in DecodeImage" + elif isinstance(img, bytes): + if self.backend == "pil": + data = io.BytesIO(img) + img = Image.open(data) else: - assert type(img) is bytes and len( - img) > 0, "invalid input 'img' in DecodeImage" - data = np.frombuffer(img, dtype='uint8') - img = cv2.imdecode(data, 1) - if self.to_rgb: - assert img.shape[2] == 3, 'invalid shape of image[%s]' % ( - img.shape) - img = img[:, :, ::-1] - - if self.channel_first: - img = img.transpose((2, 0, 1)) + data = np.frombuffer(img, dtype="uint8") + img = cv2.imdecode(data, 1) + else: + raise ValueError("invalid input 'img' in DecodeImage") + + if self.to_np: + if self.backend == "pil": + assert img.mode == "RGB", f"invalid shape of image[{img.shape}]" + img = np.asarray(img)[:, :, ::-1] # BRG + + if self.to_rgb: + assert img.shape[2] == 3, f"invalid shape of image[{img.shape}]" + img = img[:, :, ::-1] + + if self.channel_first: + img = img.transpose((2, 0, 1)) return img @@ -170,6 +232,52 @@ class ResizeImage(object): return self._resize_func(img, (w, h)) +class CropWithPadding(RandomResizedCrop): + """ + crop image and padding to original size + """ + + def __init__(self, + prob=1, + padding_num=0, + size=224, + scale=(0.08, 1.0), + ratio=(3. / 4, 4. / 3), + interpolation='bilinear', + key=None): + super().__init__(size, scale, ratio, interpolation, key) + self.prob = prob + self.padding_num = padding_num + + def __call__(self, img): + is_cv2_img = False + if isinstance(img, np.ndarray): + flag = True + if np.random.random() < self.prob: + # RandomResizedCrop augmentation + new = np.zeros_like(np.array(img)) + self.padding_num + # orig_W, orig_H = F._get_image_size(sample) + orig_W, orig_H = self._get_image_size(img) + i, j, h, w = self._get_param(img) + cropped = F.crop(img, i, j, h, w) + new[i:i + h, j:j + w, :] = np.array(cropped) + if not isinstance: + new = Image.fromarray(new.astype(np.uint8)) + return new + else: + return img + + def _get_image_size(self, img): + if F._is_pil_image(img): + return img.size + elif F._is_numpy_image(img): + return img.shape[:2][::-1] + elif F._is_tensor_image(img): + return img.shape[1:][::-1] # chw + else: + raise TypeError("Unexpected type {}".format(type(img))) + + class CropImage(object): """ crop image """ @@ -190,6 +298,102 @@ class CropImage(object): return img[h_start:h_end, w_start:w_end, :] +class Padv2(object): + def __init__(self, + size=None, + size_divisor=32, + pad_mode=0, + offsets=None, + fill_value=(127.5, 127.5, 127.5)): + """ + Pad image to a specified size or multiple of size_divisor. + Args: + size (int, list): image target size, if None, pad to multiple of size_divisor, default None + size_divisor (int): size divisor, default 32 + pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets + if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top + offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1 + fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5) + """ + + if not isinstance(size, (int, list)): + raise TypeError( + "Type of target_size is invalid when random_size is True. \ + Must be List, now is {}".format(type(size))) + + if isinstance(size, int): + size = [size, size] + + assert pad_mode in [ + -1, 0, 1, 2 + ], 'currently only supports four modes [-1, 0, 1, 2]' + if pad_mode == -1: + assert offsets, 'if pad_mode is -1, offsets should not be None' + + self.size = size + self.size_divisor = size_divisor + self.pad_mode = pad_mode + self.fill_value = fill_value + self.offsets = offsets + + def apply_image(self, image, offsets, im_size, size): + x, y = offsets + im_h, im_w = im_size + h, w = size + canvas = np.ones((h, w, 3), dtype=np.float32) + canvas *= np.array(self.fill_value, dtype=np.float32) + canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32) + return canvas + + def __call__(self, img): + im_h, im_w = img.shape[:2] + if self.size: + w, h = self.size + assert ( + im_h <= h and im_w <= w + ), '(h, w) of target size should be greater than (im_h, im_w)' + else: + h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor) + w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor) + + if h == im_h and w == im_w: + return img.astype(np.float32) + + if self.pad_mode == -1: + offset_x, offset_y = self.offsets + elif self.pad_mode == 0: + offset_y, offset_x = 0, 0 + elif self.pad_mode == 1: + offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2 + else: + offset_y, offset_x = h - im_h, w - im_w + + offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w] + + return self.apply_image(img, offsets, im_size, size) + + +class RandomCropImage(object): + """Random crop image only + """ + + def __init__(self, size): + super(RandomCropImage, self).__init__() + if isinstance(size, int): + size = [size, size] + self.size = size + + def __call__(self, img): + + h, w = img.shape[:2] + tw, th = self.size + i = random.randint(0, h - th) + j = random.randint(0, w - tw) + + img = img[i:i + th, j:j + tw, :] + return img + + class RandCropImage(object): """ random crop image """ @@ -252,7 +456,7 @@ class RandCropImageV2(object): def __call__(self, img): if isinstance(img, np.ndarray): - img_h, img_w = img.shap[0], img.shap[1] + img_h, img_w = img.shape[0], img.shape[1] else: img_w, img_h = img.size tw, th = self.size @@ -434,16 +638,18 @@ class ColorJitter(RawColorJitter): """ColorJitter. """ - def __init__(self, *args, **kwargs): + def __init__(self, prob=2, *args, **kwargs): super().__init__(*args, **kwargs) + self.prob = prob def __call__(self, img): - if not isinstance(img, Image.Image): - img = np.ascontiguousarray(img) - img = Image.fromarray(img) - img = super()._apply_image(img) - if isinstance(img, Image.Image): - img = np.asarray(img) + if np.random.random() < self.prob: + if not isinstance(img, Image.Image): + img = np.ascontiguousarray(img) + img = Image.fromarray(img) + img = super()._apply_image(img) + if isinstance(img, Image.Image): + img = np.asarray(img) return img @@ -463,8 +669,8 @@ class Pad(object): # Process fill color for affine transforms major_found, minor_found = (int(v) for v in PILLOW_VERSION.split('.')[:2]) - major_required, minor_required = ( - int(v) for v in min_pil_version.split('.')[:2]) + major_required, minor_required = (int(v) for v in + min_pil_version.split('.')[:2]) if major_found < major_required or (major_found == major_required and minor_found < minor_required): if fill is None: diff --git a/ppcls/engine/engine.py b/ppcls/engine/engine.py index 5b5c4da8a6500ab90c31f33097075db5f8ee5f89..1aa0a1e05c306f46c77ff09b3fb6af344d3e01e3 100644 --- a/ppcls/engine/engine.py +++ b/ppcls/engine/engine.py @@ -34,6 +34,7 @@ from ppcls.arch import apply_to_static from ppcls.loss import build_loss from ppcls.metric import build_metrics from ppcls.optimizer import build_optimizer +from ppcls.utils.ema import ExponentialMovingAverage from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url from ppcls.utils.save_load import init_model from ppcls.utils import save_load @@ -75,8 +76,9 @@ class Engine(object): print_config(config) # init train_func and eval_func - assert self.eval_mode in ["classification", "retrieval"], logger.error( - "Invalid eval mode: {}".format(self.eval_mode)) + assert self.eval_mode in [ + "classification", "retrieval", "adaface" + ], logger.error("Invalid eval mode: {}".format(self.eval_mode)) self.train_epoch_func = train_epoch self.eval_func = getattr(evaluation, self.eval_mode + "_eval") @@ -98,6 +100,9 @@ class Engine(object): logger.info('train with paddle {} and device {}'.format( paddle.__version__, self.device)) + # gradient accumulation + self.update_freq = self.config["Global"].get("update_freq", 1) + if "class_num" in config["Global"]: global_class_num = config["Global"]["class_num"] if "class_num" not in config["Arch"]: @@ -115,7 +120,7 @@ class Engine(object): self.config["DataLoader"], "Train", self.device, self.use_dali) if self.mode == "eval" or (self.mode == "train" and self.config["Global"]["eval_during_train"]): - if self.eval_mode == "classification": + if self.eval_mode in ["classification", "adaface"]: self.eval_dataloader = build_dataloader( self.config["DataLoader"], "Eval", self.device, self.use_dali) @@ -151,45 +156,39 @@ class Engine(object): self.eval_loss_func = None # build metric - if self.mode == 'train': - metric_config = self.config.get("Metric") - if metric_config is not None: - metric_config = metric_config.get("Train") - if metric_config is not None: - if hasattr( - self.train_dataloader, "collate_fn" - ) and self.train_dataloader.collate_fn is not None: - for m_idx, m in enumerate(metric_config): - if "TopkAcc" in m: - msg = f"'TopkAcc' metric can not be used when setting 'batch_transform_ops' in config. The 'TopkAcc' metric has been removed." - logger.warning(msg) - break + if self.mode == 'train' and "Metric" in self.config and "Train" in self.config[ + "Metric"] and self.config["Metric"]["Train"]: + metric_config = self.config["Metric"]["Train"] + if hasattr(self.train_dataloader, "collate_fn" + ) and self.train_dataloader.collate_fn is not None: + for m_idx, m in enumerate(metric_config): + if "TopkAcc" in m: + msg = f"Unable to calculate accuracy when using \"batch_transform_ops\". The metric \"{m}\" has been removed." + logger.warning(msg) metric_config.pop(m_idx) - self.train_metric_func = build_metrics(metric_config) - else: - self.train_metric_func = None + self.train_metric_func = build_metrics(metric_config) else: self.train_metric_func = None if self.mode == "eval" or (self.mode == "train" and self.config["Global"]["eval_during_train"]): - metric_config = self.config.get("Metric") if self.eval_mode == "classification": - if metric_config is not None: - metric_config = metric_config.get("Eval") - if metric_config is not None: - self.eval_metric_func = build_metrics(metric_config) + if "Metric" in self.config and "Eval" in self.config["Metric"]: + self.eval_metric_func = build_metrics(self.config["Metric"] + ["Eval"]) + else: + self.eval_metric_func = None elif self.eval_mode == "retrieval": - if metric_config is None: - metric_config = [{"name": "Recallk", "topk": (1, 5)}] + if "Metric" in self.config and "Eval" in self.config["Metric"]: + metric_config = self.config["Metric"]["Eval"] else: - metric_config = metric_config["Eval"] + metric_config = [{"name": "Recallk", "topk": (1, 5)}] self.eval_metric_func = build_metrics(metric_config) else: self.eval_metric_func = None # build model - self.model = build_model(self.config) + self.model = build_model(self.config, self.mode) # set @to_static for benchmark, skip this by default. apply_to_static(self.config, self.model) @@ -208,7 +207,7 @@ class Engine(object): if self.mode == 'train': self.optimizer, self.lr_sch = build_optimizer( self.config["Optimizer"], self.config["Global"]["epochs"], - len(self.train_dataloader), + len(self.train_dataloader) // self.update_freq, [self.model, self.train_loss_func]) # AMP training and evaluating @@ -221,7 +220,7 @@ class Engine(object): AMP_RELATED_FLAGS_SETTING.update({ 'FLAGS_cudnn_batchnorm_spatial_persistent': 1 }) - paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + paddle.set_flags(AMP_RELATED_FLAGS_SETTING) self.scale_loss = self.config["AMP"].get("scale_loss", 1.0) self.use_dynamic_loss_scaling = self.config["AMP"].get( @@ -239,7 +238,7 @@ class Engine(object): self.amp_eval = self.config["AMP"].get("use_fp16_test", False) # TODO(gaotingquan): Paddle not yet support FP32 evaluation when training with AMPO2 - if self.config["Global"].get( + if self.mode == "train" and self.config["Global"].get( "eval_during_train", True) and self.amp_level == "O2" and self.amp_eval == False: msg = "PaddlePaddle only support FP16 evaluation when training with AMP O2 now. " @@ -269,10 +268,11 @@ class Engine(object): save_dtype='float32') # paddle version >= 2.3.0 or develop else: - self.model = paddle.amp.decorate( - models=self.model, - level=self.amp_level, - save_dtype='float32') + if self.mode == "train" or self.amp_eval: + self.model = paddle.amp.decorate( + models=self.model, + level=self.amp_level, + save_dtype='float32') if self.mode == "train" and len(self.train_loss_func.parameters( )) > 0: @@ -281,6 +281,12 @@ class Engine(object): level=self.amp_level, save_dtype='float32') + # build EMA model + self.ema = "EMA" in self.config and self.mode == "train" + if self.ema: + self.model_ema = ExponentialMovingAverage( + self.model, self.config['EMA'].get("decay", 0.9999)) + # check the gpu num world_size = dist.get_world_size() self.config["Global"]["distributed"] = world_size != 1 @@ -312,9 +318,13 @@ class Engine(object): print_batch_step = self.config['Global']['print_batch_step'] save_interval = self.config["Global"]["save_interval"] best_metric = { - "metric": 0.0, + "metric": -1.0, "epoch": 0, } + ema_module = None + if self.ema: + best_metric_ema = 0.0 + ema_module = self.model_ema.module # key: # val: metrics list word self.output_info = dict() @@ -329,12 +339,14 @@ class Engine(object): if self.config.Global.checkpoints is not None: metric_info = init_model(self.config.Global, self.model, - self.optimizer, self.train_loss_func) + self.optimizer, self.train_loss_func, + ema_module) if metric_info is not None: best_metric.update(metric_info) self.max_iter = len(self.train_dataloader) - 1 if platform.system( ) == "Windows" else len(self.train_dataloader) + self.max_iter = self.max_iter // self.update_freq * self.update_freq for epoch_id in range(best_metric["epoch"] + 1, self.config["Global"]["epochs"] + 1): @@ -344,18 +356,18 @@ class Engine(object): if self.use_dali: self.train_dataloader.reset() - metric_msg = ", ".join([ - "{}: {:.5f}".format(key, self.output_info[key].avg) - for key in self.output_info - ]) + metric_msg = ", ".join( + [self.output_info[key].avg_info for key in self.output_info]) logger.info("[Train][Epoch {}/{}][Avg]{}".format( epoch_id, self.config["Global"]["epochs"], metric_msg)) self.output_info.clear() # eval model and save model if possible + start_eval_epoch = self.config["Global"].get("start_eval_epoch", + 0) - 1 if self.config["Global"][ "eval_during_train"] and epoch_id % self.config["Global"][ - "eval_interval"] == 0: + "eval_interval"] == 0 and epoch_id > start_eval_epoch: acc = self.eval(epoch_id) if acc > best_metric["metric"]: best_metric["metric"] = acc @@ -365,9 +377,11 @@ class Engine(object): self.optimizer, best_metric, self.output_dir, + ema=ema_module, model_name=self.config["Arch"]["name"], prefix="best_model", - loss=self.train_loss_func) + loss=self.train_loss_func, + save_student_model=True) logger.info("[Eval][Epoch {}][best metric: {}]".format( epoch_id, best_metric["metric"])) logger.scaler( @@ -378,6 +392,32 @@ class Engine(object): self.model.train() + if self.ema: + ori_model, self.model = self.model, ema_module + acc_ema = self.eval(epoch_id) + self.model = ori_model + ema_module.eval() + + if acc_ema > best_metric_ema: + best_metric_ema = acc_ema + save_load.save_model( + self.model, + self.optimizer, + {"metric": acc_ema, + "epoch": epoch_id}, + self.output_dir, + ema=ema_module, + model_name=self.config["Arch"]["name"], + prefix="best_model_ema", + loss=self.train_loss_func) + logger.info("[Eval][Epoch {}][best metric ema: {}]".format( + epoch_id, best_metric_ema)) + logger.scaler( + name="eval_acc_ema", + value=acc_ema, + step=epoch_id, + writer=self.vdl_writer) + # save model if epoch_id % save_interval == 0: save_load.save_model( @@ -385,6 +425,7 @@ class Engine(object): self.optimizer, {"metric": acc, "epoch": epoch_id}, self.output_dir, + ema=ema_module, model_name=self.config["Arch"]["name"], prefix="epoch_{}".format(epoch_id), loss=self.train_loss_func) @@ -394,6 +435,7 @@ class Engine(object): self.optimizer, {"metric": acc, "epoch": epoch_id}, self.output_dir, + ema=ema_module, model_name=self.config["Arch"]["name"], prefix="latest", loss=self.train_loss_func) @@ -431,9 +473,21 @@ class Engine(object): image_file_list.append(image_file) if len(batch_data) >= batch_size or idx == len(image_list) - 1: batch_tensor = paddle.to_tensor(batch_data) - out = self.model(batch_tensor) + + if self.amp and self.amp_eval: + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=self.amp_level): + out = self.model(batch_tensor) + else: + out = self.model(batch_tensor) + if isinstance(out, list): out = out[0] + if isinstance(out, dict) and "Student" in out: + out = out["Student"] if isinstance(out, dict) and "logits" in out: out = out["logits"] if isinstance(out, dict) and "output" in out: @@ -445,33 +499,40 @@ class Engine(object): def export(self): assert self.mode == "export" - use_multilabel = self.config["Global"].get("use_multilabel", False) + use_multilabel = self.config["Global"].get( + "use_multilabel", + False) and "ATTRMetric" in self.config["Metric"]["Eval"][0] model = ExportModel(self.config["Arch"], self.model, use_multilabel) if self.config["Global"]["pretrained_model"] is not None: load_dygraph_pretrain(model.base_model, self.config["Global"]["pretrained_model"]) model.eval() + + # for rep nets + for layer in self.model.sublayers(): + if hasattr(layer, "rep") and not getattr(layer, "is_repped"): + layer.rep() + save_path = os.path.join(self.config["Global"]["save_inference_dir"], "inference") - if model.quanter: - model.quanter.save_quantized_model( - model.base_model, - save_path, - input_spec=[ - paddle.static.InputSpec( - shape=[None] + self.config["Global"]["image_shape"], - dtype='float32') - ]) + + model = paddle.jit.to_static( + model, + input_spec=[ + paddle.static.InputSpec( + shape=[None] + self.config["Global"]["image_shape"], + dtype='float32') + ]) + if hasattr(model.base_model, + "quanter") and model.base_model.quanter is not None: + model.base_model.quanter.save_quantized_model(model, + save_path + "_int8") else: - model = paddle.jit.to_static( - model, - input_spec=[ - paddle.static.InputSpec( - shape=[None] + self.config["Global"]["image_shape"], - dtype='float32') - ]) paddle.jit.save(model, save_path) + logger.info( + f"Export succeeded! The inference model exported has been saved in \"{self.config['Global']['save_inference_dir']}\"." + ) class ExportModel(TheseusLayer): diff --git a/ppcls/engine/evaluation/__init__.py b/ppcls/engine/evaluation/__init__.py index e0cd778887bf6f0e7ce05c18b587e5b54bcf6b3f..a301ad7fda34b87a959b59251b6dd0fffe9eb3e9 100644 --- a/ppcls/engine/evaluation/__init__.py +++ b/ppcls/engine/evaluation/__init__.py @@ -14,3 +14,4 @@ from ppcls.engine.evaluation.classification import classification_eval from ppcls.engine.evaluation.retrieval import retrieval_eval +from ppcls.engine.evaluation.adaface import adaface_eval \ No newline at end of file diff --git a/ppcls/engine/evaluation/adaface.py b/ppcls/engine/evaluation/adaface.py new file mode 100644 index 0000000000000000000000000000000000000000..e62144b5cb374a14a93616c33e56ee74bef0eb01 --- /dev/null +++ b/ppcls/engine/evaluation/adaface.py @@ -0,0 +1,260 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import time +import numpy as np +import platform +import paddle +import sklearn +from sklearn.model_selection import KFold +from sklearn.decomposition import PCA + +from ppcls.utils.misc import AverageMeter +from ppcls.utils import logger + + +def fuse_features_with_norm(stacked_embeddings, stacked_norms): + assert stacked_embeddings.ndim == 3 # (n_features_to_fuse, batch_size, channel) + assert stacked_norms.ndim == 3 # (n_features_to_fuse, batch_size, 1) + pre_norm_embeddings = stacked_embeddings * stacked_norms + fused = pre_norm_embeddings.sum(axis=0) + norm = paddle.norm(fused, 2, 1, True) + fused = paddle.divide(fused, norm) + return fused, norm + + +def adaface_eval(engine, epoch_id=0): + output_info = dict() + time_info = { + "batch_cost": AverageMeter( + "batch_cost", '.5f', postfix=" s,"), + "reader_cost": AverageMeter( + "reader_cost", ".5f", postfix=" s,"), + } + print_batch_step = engine.config["Global"]["print_batch_step"] + + metric_key = None + tic = time.time() + unique_dict = {} + for iter_id, batch in enumerate(engine.eval_dataloader): + images, labels, dataname, image_index = batch + if iter_id == 5: + for key in time_info: + time_info[key].reset() + time_info["reader_cost"].update(time.time() - tic) + batch_size = images.shape[0] + batch[0] = paddle.to_tensor(images) + embeddings = engine.model(images, labels)['features'] + norms = paddle.divide(embeddings, paddle.norm(embeddings, 2, 1, True)) + embeddings = paddle.divide(embeddings, norms) + fliped_images = paddle.flip(images, axis=[3]) + flipped_embeddings = engine.model(fliped_images, labels)['features'] + flipped_norms = paddle.divide( + flipped_embeddings, paddle.norm(flipped_embeddings, 2, 1, True)) + flipped_embeddings = paddle.divide(flipped_embeddings, flipped_norms) + stacked_embeddings = paddle.stack( + [embeddings, flipped_embeddings], axis=0) + stacked_norms = paddle.stack([norms, flipped_norms], axis=0) + embeddings, norms = fuse_features_with_norm(stacked_embeddings, + stacked_norms) + + for out, nor, label, data, idx in zip(embeddings, norms, labels, + dataname, image_index): + unique_dict[int(idx.numpy())] = { + 'output': out, + 'norm': nor, + 'target': label, + 'dataname': data + } + # calc metric + time_info["batch_cost"].update(time.time() - tic) + if iter_id % print_batch_step == 0: + time_msg = "s, ".join([ + "{}: {:.5f}".format(key, time_info[key].avg) + for key in time_info + ]) + + ips_msg = "ips: {:.5f} images/sec".format( + batch_size / time_info["batch_cost"].avg) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( + epoch_id, iter_id, + len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) + + tic = time.time() + + unique_keys = sorted(unique_dict.keys()) + all_output_tensor = paddle.stack( + [unique_dict[key]['output'] for key in unique_keys], axis=0) + all_norm_tensor = paddle.stack( + [unique_dict[key]['norm'] for key in unique_keys], axis=0) + all_target_tensor = paddle.stack( + [unique_dict[key]['target'] for key in unique_keys], axis=0) + all_dataname_tensor = paddle.stack( + [unique_dict[key]['dataname'] for key in unique_keys], axis=0) + + eval_result = cal_metric(all_output_tensor, all_norm_tensor, + all_target_tensor, all_dataname_tensor) + + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info + ]) + face_msg = ", ".join([ + "{}: {:.5f}".format(key, eval_result[key]) + for key in eval_result.keys() + ]) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg + ", " + + face_msg)) + + # return 1st metric in the dict + return eval_result['all_test_acc'] + + +def cal_metric(all_output_tensor, all_norm_tensor, all_target_tensor, + all_dataname_tensor): + all_target_tensor = all_target_tensor.reshape([-1]) + all_dataname_tensor = all_dataname_tensor.reshape([-1]) + dataname_to_idx = { + "agedb_30": 0, + "cfp_fp": 1, + "lfw": 2, + "cplfw": 3, + "calfw": 4 + } + idx_to_dataname = {val: key for key, val in dataname_to_idx.items()} + test_logs = {} + # _, indices = paddle.unique(all_dataname_tensor, return_index=True, return_inverse=False, return_counts=False) + for dataname_idx in all_dataname_tensor.unique(): + dataname = idx_to_dataname[dataname_idx.item()] + # per dataset evaluation + embeddings = all_output_tensor[all_dataname_tensor == + dataname_idx].numpy() + labels = all_target_tensor[all_dataname_tensor == dataname_idx].numpy() + issame = labels[0::2] + tpr, fpr, accuracy, best_thresholds = evaluate_face( + embeddings, issame, nrof_folds=10) + acc, best_threshold = accuracy.mean(), best_thresholds.mean() + + num_test_samples = len(embeddings) + test_logs[f'{dataname}_test_acc'] = acc + test_logs[f'{dataname}_test_best_threshold'] = best_threshold + test_logs[f'{dataname}_num_test_samples'] = num_test_samples + + test_acc = np.mean([ + test_logs[f'{dataname}_test_acc'] + for dataname in dataname_to_idx.keys() + if f'{dataname}_test_acc' in test_logs + ]) + + test_logs['all_test_acc'] = test_acc + return test_logs + + +def evaluate_face(embeddings, actual_issame, nrof_folds=10, pca=0): + # Calculate evaluation metrics + thresholds = np.arange(0, 4, 0.01) + embeddings1 = embeddings[0::2] + embeddings2 = embeddings[1::2] + tpr, fpr, accuracy, best_thresholds = calculate_roc( + thresholds, + embeddings1, + embeddings2, + np.asarray(actual_issame), + nrof_folds=nrof_folds, + pca=pca) + return tpr, fpr, accuracy, best_thresholds + + +def calculate_roc(thresholds, + embeddings1, + embeddings2, + actual_issame, + nrof_folds=10, + pca=0): + assert (embeddings1.shape[0] == embeddings2.shape[0]) + assert (embeddings1.shape[1] == embeddings2.shape[1]) + nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) + nrof_thresholds = len(thresholds) + k_fold = KFold(n_splits=nrof_folds, shuffle=False) + + tprs = np.zeros((nrof_folds, nrof_thresholds)) + fprs = np.zeros((nrof_folds, nrof_thresholds)) + accuracy = np.zeros((nrof_folds)) + best_thresholds = np.zeros((nrof_folds)) + indices = np.arange(nrof_pairs) + # print('pca', pca) + dist = None + + if pca == 0: + diff = np.subtract(embeddings1, embeddings2) + dist = np.sum(np.square(diff), 1) + + for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): + # print('train_set', train_set) + # print('test_set', test_set) + if pca > 0: + print('doing pca on', fold_idx) + embed1_train = embeddings1[train_set] + embed2_train = embeddings2[train_set] + _embed_train = np.concatenate((embed1_train, embed2_train), axis=0) + # print(_embed_train.shape) + pca_model = PCA(n_components=pca) + pca_model.fit(_embed_train) + embed1 = pca_model.transform(embeddings1) + embed2 = pca_model.transform(embeddings2) + embed1 = sklearn.preprocessing.normalize(embed1) + embed2 = sklearn.preprocessing.normalize(embed2) + # print(embed1.shape, embed2.shape) + diff = np.subtract(embed1, embed2) + dist = np.sum(np.square(diff), 1) + + # Find the best threshold for the fold + acc_train = np.zeros((nrof_thresholds)) + for threshold_idx, threshold in enumerate(thresholds): + _, _, acc_train[threshold_idx] = calculate_accuracy( + threshold, dist[train_set], actual_issame[train_set]) + best_threshold_index = np.argmax(acc_train) + best_thresholds[fold_idx] = thresholds[best_threshold_index] + for threshold_idx, threshold in enumerate(thresholds): + tprs[fold_idx, threshold_idx], fprs[ + fold_idx, threshold_idx], _ = calculate_accuracy( + threshold, dist[test_set], actual_issame[test_set]) + _, _, accuracy[fold_idx] = calculate_accuracy( + thresholds[best_threshold_index], dist[test_set], + actual_issame[test_set]) + + tpr = np.mean(tprs, 0) + fpr = np.mean(fprs, 0) + return tpr, fpr, accuracy, best_thresholds + + +def calculate_accuracy(threshold, dist, actual_issame): + predict_issame = np.less(dist, threshold) + tp = np.sum(np.logical_and(predict_issame, actual_issame)) + fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) + tn = np.sum( + np.logical_and( + np.logical_not(predict_issame), np.logical_not(actual_issame))) + fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame)) + + tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn) + fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn) + acc = float(tp + tn) / dist.size + return tpr, fpr, acc diff --git a/ppcls/engine/evaluation/classification.py b/ppcls/engine/evaluation/classification.py index 60595e6a9014b4003ab8008b8144d92d628a2acd..5b305b0a0cb8f3c94561fd338631a2b3a4278687 100644 --- a/ppcls/engine/evaluation/classification.py +++ b/ppcls/engine/evaluation/classification.py @@ -23,6 +23,8 @@ from ppcls.utils import logger def classification_eval(engine, epoch_id=0): + if hasattr(engine.eval_metric_func, "reset"): + engine.eval_metric_func.reset() output_info = dict() time_info = { "batch_cost": AverageMeter( @@ -32,7 +34,6 @@ def classification_eval(engine, epoch_id=0): } print_batch_step = engine.config["Global"]["print_batch_step"] - metric_key = None tic = time.time() accum_samples = 0 total_samples = len( @@ -80,6 +81,7 @@ def classification_eval(engine, epoch_id=0): # gather Tensor when distributed if paddle.distributed.get_world_size() > 1: label_list = [] + paddle.distributed.all_gather(label_list, batch[1]) labels = paddle.concat(label_list, 0) @@ -96,7 +98,14 @@ def classification_eval(engine, epoch_id=0): preds = paddle.concat(pred_list, 0) if accum_samples > total_samples and not engine.use_dali: - preds = preds[:total_samples + current_samples - accum_samples] + if isinstance(preds, list): + preds = [ + pred[:total_samples + current_samples - accum_samples] + for pred in preds + ] + else: + preds = preds[:total_samples + current_samples - + accum_samples] labels = labels[:total_samples + current_samples - accum_samples] current_samples = total_samples + current_samples - accum_samples @@ -121,18 +130,10 @@ def classification_eval(engine, epoch_id=0): output_info[key] = AverageMeter(key, '7.5f') output_info[key].update(loss_dict[key].numpy()[0], current_samples) + # calc metric if engine.eval_metric_func is not None: - metric_dict = engine.eval_metric_func(preds, labels) - for key in metric_dict: - if metric_key is None: - metric_key = key - if key not in output_info: - output_info[key] = AverageMeter(key, '7.5f') - - output_info[key].update(metric_dict[key].numpy()[0], - current_samples) - + engine.eval_metric_func(preds, labels) time_info["batch_cost"].update(time.time() - tic) if iter_id % print_batch_step == 0: @@ -144,10 +145,14 @@ def classification_eval(engine, epoch_id=0): ips_msg = "ips: {:.5f} images/sec".format( batch_size / time_info["batch_cost"].avg) - metric_msg = ", ".join([ - "{}: {:.5f}".format(key, output_info[key].val) - for key in output_info - ]) + if "ATTRMetric" in engine.config["Metric"]["Eval"][0]: + metric_msg = "" + else: + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].val) + for key in output_info + ]) + metric_msg += ", {}".format(engine.eval_metric_func.avg_info) logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format( epoch_id, iter_id, len(engine.eval_dataloader), metric_msg, time_msg, ips_msg)) @@ -155,13 +160,29 @@ def classification_eval(engine, epoch_id=0): tic = time.time() if engine.use_dali: engine.eval_dataloader.reset() - metric_msg = ", ".join([ - "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info - ]) - logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) - - # do not try to save best eval.model - if engine.eval_metric_func is None: - return -1 - # return 1st metric in the dict - return output_info[metric_key].avg + + if "ATTRMetric" in engine.config["Metric"]["Eval"][0]: + metric_msg = ", ".join([ + "evalres: ma: {:.5f} label_f1: {:.5f} label_pos_recall: {:.5f} label_neg_recall: {:.5f} instance_f1: {:.5f} instance_acc: {:.5f} instance_prec: {:.5f} instance_recall: {:.5f}". + format(*engine.eval_metric_func.attr_res()) + ]) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return engine.eval_metric_func.attr_res()[0] + else: + metric_msg = ", ".join([ + "{}: {:.5f}".format(key, output_info[key].avg) + for key in output_info + ]) + metric_msg += ", {}".format(engine.eval_metric_func.avg_info) + logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) + + # do not try to save best eval.model + if engine.eval_metric_func is None: + return -1 + # return 1st metric in the dict + return engine.eval_metric_func.avg diff --git a/ppcls/engine/evaluation/retrieval.py b/ppcls/engine/evaluation/retrieval.py index 05c5d0c35d0f6fdfcd0a8f1dc1a8a121026ede99..02cae1670bbe1255a84fcf80c3097c5c020c917f 100644 --- a/ppcls/engine/evaluation/retrieval.py +++ b/ppcls/engine/evaluation/retrieval.py @@ -16,6 +16,9 @@ from __future__ import division from __future__ import print_function import platform +from typing import Optional + +import numpy as np import paddle from ppcls.utils import logger @@ -48,34 +51,67 @@ def retrieval_eval(engine, epoch_id=0): if engine.eval_loss_func is None: metric_dict = {metric_key: 0.} else: + reranking_flag = engine.config['Global'].get('re_ranking', False) + logger.info(f"re_ranking={reranking_flag}") metric_dict = dict() - for block_idx, block_fea in enumerate(fea_blocks): - similarity_matrix = paddle.matmul( - block_fea, gallery_feas, transpose_y=True) - if query_query_id is not None: - query_id_block = query_id_blocks[block_idx] - query_id_mask = (query_id_block != gallery_unique_id.t()) - - image_id_block = image_id_blocks[block_idx] - image_id_mask = (image_id_block != gallery_img_id.t()) - - keep_mask = paddle.logical_or(query_id_mask, image_id_mask) - similarity_matrix = similarity_matrix * keep_mask.astype( - "float32") - else: - keep_mask = None - - metric_tmp = engine.eval_metric_func(similarity_matrix, - image_id_blocks[block_idx], - gallery_img_id, keep_mask) + if reranking_flag: + # set the order from small to large + for i in range(len(engine.eval_metric_func.metric_func_list)): + if hasattr(engine.eval_metric_func.metric_func_list[i], 'descending') \ + and engine.eval_metric_func.metric_func_list[i].descending is True: + engine.eval_metric_func.metric_func_list[ + i].descending = False + logger.warning( + f"re_ranking=True,{engine.eval_metric_func.metric_func_list[i].__class__.__name__}.descending has been set to False" + ) + + # compute distance matrix(The smaller the value, the more similar) + distmat = re_ranking( + query_feas, gallery_feas, k1=20, k2=6, lambda_value=0.3) + # compute keep mask + query_id_mask = (query_query_id != gallery_unique_id.t()) + image_id_mask = (query_img_id != gallery_img_id.t()) + keep_mask = paddle.logical_or(query_id_mask, image_id_mask) + + # set inf(1e9) distance to those exist in gallery + distmat = distmat * keep_mask.astype("float32") + inf_mat = (paddle.logical_not(keep_mask).astype("float32")) * 1e20 + distmat = distmat + inf_mat + + # compute metric + metric_tmp = engine.eval_metric_func(distmat, query_img_id, + gallery_img_id, keep_mask) for key in metric_tmp: - if key not in metric_dict: - metric_dict[key] = metric_tmp[key] * block_fea.shape[ - 0] / len(query_feas) + metric_dict[key] = metric_tmp[key] + else: + for block_idx, block_fea in enumerate(fea_blocks): + similarity_matrix = paddle.matmul( + block_fea, gallery_feas, transpose_y=True) # [n,m] + if query_query_id is not None: + query_id_block = query_id_blocks[block_idx] + query_id_mask = (query_id_block != gallery_unique_id.t()) + + image_id_block = image_id_blocks[block_idx] + image_id_mask = (image_id_block != gallery_img_id.t()) + + keep_mask = paddle.logical_or(query_id_mask, image_id_mask) + similarity_matrix = similarity_matrix * keep_mask.astype( + "float32") else: - metric_dict[key] += metric_tmp[key] * block_fea.shape[ - 0] / len(query_feas) + keep_mask = None + + metric_tmp = engine.eval_metric_func( + similarity_matrix, image_id_blocks[block_idx], + gallery_img_id, keep_mask) + + for key in metric_tmp: + if key not in metric_dict: + metric_dict[key] = metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) + else: + metric_dict[key] += metric_tmp[key] * block_fea.shape[ + 0] / len(query_feas) metric_info_list = [] for key in metric_dict: @@ -123,7 +159,15 @@ def cal_feature(engine, name='gallery'): if len(batch) == 3: has_unique_id = True batch[2] = batch[2].reshape([-1, 1]).astype("int64") - out = engine.model(batch[0], batch[1]) + if engine.amp and engine.amp_eval: + with paddle.amp.auto_cast( + custom_black_list={ + "flatten_contiguous_range", "greater_than" + }, + level=engine.amp_level): + out = engine.model(batch[0], batch[1]) + else: + out = engine.model(batch[0], batch[1]) if "Student" in out: out = out["Student"] @@ -185,3 +229,109 @@ def cal_feature(engine, name='gallery'): logger.info("Build {} done, all feat shape: {}, begin to eval..".format( name, all_feas.shape)) return all_feas, all_img_id, all_unique_id + + +def re_ranking(query_feas: paddle.Tensor, + gallery_feas: paddle.Tensor, + k1: int=20, + k2: int=6, + lambda_value: int=0.5, + local_distmat: Optional[np.ndarray]=None, + only_local: bool=False) -> paddle.Tensor: + """re-ranking, most computed with numpy + + code heavily based on + https://github.com/michuanhaohao/reid-strong-baseline/blob/3da7e6f03164a92e696cb6da059b1cd771b0346d/utils/reid_metric.py + + Args: + query_feas (paddle.Tensor): query features, [num_query, num_features] + gallery_feas (paddle.Tensor): gallery features, [num_gallery, num_features] + k1 (int, optional): k1. Defaults to 20. + k2 (int, optional): k2. Defaults to 6. + lambda_value (int, optional): lambda. Defaults to 0.5. + local_distmat (Optional[np.ndarray], optional): local_distmat. Defaults to None. + only_local (bool, optional): only_local. Defaults to False. + + Returns: + paddle.Tensor: final_dist matrix after re-ranking, [num_query, num_gallery] + """ + query_num = query_feas.shape[0] + all_num = query_num + gallery_feas.shape[0] + if only_local: + original_dist = local_distmat + else: + feat = paddle.concat([query_feas, gallery_feas]) + logger.info('using GPU to compute original distance') + + # L2 distance + distmat = paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([all_num, all_num]) + \ + paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([all_num, all_num]).t() + distmat = distmat.addmm(x=feat, y=feat.t(), alpha=-2.0, beta=1.0) + + original_dist = distmat.cpu().numpy() + del feat + if local_distmat is not None: + original_dist = original_dist + local_distmat + + gallery_num = original_dist.shape[0] + original_dist = np.transpose(original_dist / np.max(original_dist, axis=0)) + V = np.zeros_like(original_dist).astype(np.float16) + initial_rank = np.argsort(original_dist).astype(np.int32) + logger.info('starting re_ranking') + for i in range(all_num): + # k-reciprocal neighbors + forward_k_neigh_index = initial_rank[i, :k1 + 1] + backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1] + fi = np.where(backward_k_neigh_index == i)[0] + k_reciprocal_index = forward_k_neigh_index[fi] + k_reciprocal_expansion_index = k_reciprocal_index + for j in range(len(k_reciprocal_index)): + candidate = k_reciprocal_index[j] + candidate_forward_k_neigh_index = initial_rank[candidate, :int( + np.around(k1 / 2)) + 1] + candidate_backward_k_neigh_index = initial_rank[ + candidate_forward_k_neigh_index, :int(np.around(k1 / 2)) + 1] + fi_candidate = np.where( + candidate_backward_k_neigh_index == candidate)[0] + candidate_k_reciprocal_index = candidate_forward_k_neigh_index[ + fi_candidate] + if len( + np.intersect1d(candidate_k_reciprocal_index, + k_reciprocal_index)) > 2 / 3 * len( + candidate_k_reciprocal_index): + k_reciprocal_expansion_index = np.append( + k_reciprocal_expansion_index, candidate_k_reciprocal_index) + + k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index) + weight = np.exp(-original_dist[i, k_reciprocal_expansion_index]) + V[i, k_reciprocal_expansion_index] = weight / np.sum(weight) + original_dist = original_dist[:query_num, ] + if k2 != 1: + V_qe = np.zeros_like(V, dtype=np.float16) + for i in range(all_num): + V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0) + V = V_qe + del V_qe + del initial_rank + invIndex = [] + for i in range(gallery_num): + invIndex.append(np.where(V[:, i] != 0)[0]) + + jaccard_dist = np.zeros_like(original_dist, dtype=np.float16) + for i in range(query_num): + temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16) + indNonZero = np.where(V[i, :] != 0)[0] + indImages = [invIndex[ind] for ind in indNonZero] + for j in range(len(indNonZero)): + temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum( + V[i, indNonZero[j]], V[indImages[j], indNonZero[j]]) + jaccard_dist[i] = 1 - temp_min / (2 - temp_min) + + final_dist = jaccard_dist * (1 - lambda_value + ) + original_dist * lambda_value + del original_dist + del V + del jaccard_dist + final_dist = final_dist[:query_num, query_num:] + final_dist = paddle.to_tensor(final_dist) + return final_dist diff --git a/ppcls/engine/train/train.py b/ppcls/engine/train/train.py index 14db79e73e9e51d16d5784b7aa48a6afb12a7e0f..a41674da70c167959c2515ec696ca2a6686cf0f8 100644 --- a/ppcls/engine/train/train.py +++ b/ppcls/engine/train/train.py @@ -53,25 +53,33 @@ def train_epoch(engine, epoch_id, print_batch_step): out = forward(engine, batch) loss_dict = engine.train_loss_func(out, batch[1]) + # loss + loss = loss_dict["loss"] / engine.update_freq + # backward & step opt if engine.amp: - scaled = engine.scaler.scale(loss_dict["loss"]) + scaled = engine.scaler.scale(loss) scaled.backward() - for i in range(len(engine.optimizer)): - engine.scaler.minimize(engine.optimizer[i], scaled) + if (iter_id + 1) % engine.update_freq == 0: + for i in range(len(engine.optimizer)): + engine.scaler.minimize(engine.optimizer[i], scaled) else: - loss_dict["loss"].backward() - for i in range(len(engine.optimizer)): - engine.optimizer[i].step() + loss.backward() + if (iter_id + 1) % engine.update_freq == 0: + for i in range(len(engine.optimizer)): + engine.optimizer[i].step() - # clear grad - for i in range(len(engine.optimizer)): - engine.optimizer[i].clear_grad() - - # step lr(by step) - for i in range(len(engine.lr_sch)): - if not getattr(engine.lr_sch[i], "by_epoch", False): - engine.lr_sch[i].step() + if (iter_id + 1) % engine.update_freq == 0: + # clear grad + for i in range(len(engine.optimizer)): + engine.optimizer[i].clear_grad() + # step lr(by step) + for i in range(len(engine.lr_sch)): + if not getattr(engine.lr_sch[i], "by_epoch", False): + engine.lr_sch[i].step() + # update ema + if engine.ema: + engine.model_ema.update(engine.model) # below code just for logging # update metric_for_logger diff --git a/ppcls/engine/train/utils.py b/ppcls/engine/train/utils.py index ca211ff932f19ca63804a5a1ff52def5eb89477f..44e54660b6453b713b2325e26b1bd5590b23c933 100644 --- a/ppcls/engine/train/utils.py +++ b/ppcls/engine/train/utils.py @@ -54,12 +54,12 @@ def log_info(trainer, batch_size, epoch_id, iter_id): ips_msg = "ips: {:.5f} samples/s".format( batch_size / trainer.time_info["batch_cost"].avg) eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1 - ) * len(trainer.train_dataloader) - iter_id + ) * trainer.max_iter - iter_id ) * trainer.time_info["batch_cost"].avg eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( epoch_id, trainer.config["Global"]["epochs"], iter_id, - len(trainer.train_dataloader), lr_msg, metric_msg, time_msg, ips_msg, + trainer.max_iter, lr_msg, metric_msg, time_msg, ips_msg, eta_msg)) for i, lr in enumerate(trainer.lr_sch): diff --git a/ppcls/loss/__init__.py b/ppcls/loss/__init__.py index c1f2f95df7afd0a266304ea2ccdf5572d1de9625..5a62e0156400e91be84dd6fac8690461dde5a8d9 100644 --- a/ppcls/loss/__init__.py +++ b/ppcls/loss/__init__.py @@ -24,6 +24,9 @@ from .distillationloss import DistillationDistanceLoss from .distillationloss import DistillationRKDLoss from .distillationloss import DistillationKLDivLoss from .distillationloss import DistillationDKDLoss +from .distillationloss import DistillationMultiLabelLoss +from .distillationloss import DistillationDISTLoss + from .multilabelloss import MultiLabelLoss from .afdloss import AFDLoss diff --git a/ppcls/loss/afdloss.py b/ppcls/loss/afdloss.py index 3e67e30b98df61576e40449015cc67a13dd6da60..e2f457451292f8b29f614d176288620d5c73006b 100644 --- a/ppcls/loss/afdloss.py +++ b/ppcls/loss/afdloss.py @@ -97,8 +97,6 @@ class Attention(nn.Layer): super().__init__() self.qk_dim = qk_dim self.n_t = n_t - # self.linear_trans_s = LinearTransformStudent(qk_dim, t_shapes, s_shapes, unique_t_shapes) - # self.linear_trans_t = LinearTransformTeacher(qk_dim, t_shapes) self.p_t = self.create_parameter( shape=[len(t_shapes), qk_dim], diff --git a/ppcls/loss/dist_loss.py b/ppcls/loss/dist_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..78c8e12ff4cf90a363d0829815ca941ae91dabbf --- /dev/null +++ b/ppcls/loss/dist_loss.py @@ -0,0 +1,52 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +def cosine_similarity(a, b, eps=1e-8): + return (a * b).sum(1) / (a.norm(axis=1) * b.norm(axis=1) + eps) + + +def pearson_correlation(a, b, eps=1e-8): + return cosine_similarity(a - a.mean(1).unsqueeze(1), + b - b.mean(1).unsqueeze(1), eps) + + +def inter_class_relation(y_s, y_t): + return 1 - pearson_correlation(y_s, y_t).mean() + + +def intra_class_relation(y_s, y_t): + return inter_class_relation(y_s.transpose([1, 0]), y_t.transpose([1, 0])) + + +class DISTLoss(nn.Layer): + # DISTLoss + # paper [Knowledge Distillation from A Stronger Teacher](https://arxiv.org/pdf/2205.10536v1.pdf) + # code reference: https://github.com/hunto/image_classification_sota/blob/d4f15a0494/lib/models/losses/dist_kd.py + def __init__(self, beta=1.0, gamma=1.0): + super().__init__() + self.beta = beta + self.gamma = gamma + + def forward(self, z_s, z_t): + y_s = F.softmax(z_s, axis=-1) + y_t = F.softmax(z_t, axis=-1) + inter_loss = inter_class_relation(y_s, y_t) + intra_loss = intra_class_relation(y_s, y_t) + kd_loss = self.beta * inter_loss + self.gamma * intra_loss + return kd_loss diff --git a/ppcls/loss/distillationloss.py b/ppcls/loss/distillationloss.py index c60a540db84edae1374e5370309256f1c98cd40a..8537fc548435f617b52203379982aa079335fbfb 100644 --- a/ppcls/loss/distillationloss.py +++ b/ppcls/loss/distillationloss.py @@ -22,6 +22,8 @@ from .distanceloss import DistanceLoss from .rkdloss import RKdAngle, RkdDistance from .kldivloss import KLDivLoss from .dkdloss import DKDLoss +from .dist_loss import DISTLoss +from .multilabelloss import MultiLabelLoss class DistillationCELoss(CELoss): @@ -89,13 +91,16 @@ class DistillationDMLLoss(DMLLoss): def __init__(self, model_name_pairs=[], act="softmax", + weight_ratio=False, + sum_across_class_dim=False, key=None, name="loss_dml"): - super().__init__(act=act) + super().__init__(act=act, sum_across_class_dim=sum_across_class_dim) assert isinstance(model_name_pairs, list) self.key = key self.model_name_pairs = model_name_pairs self.name = name + self.weight_ratio = weight_ratio def forward(self, predicts, batch): loss_dict = dict() @@ -105,7 +110,10 @@ class DistillationDMLLoss(DMLLoss): if self.key is not None: out1 = out1[self.key] out2 = out2[self.key] - loss = super().forward(out1, out2) + if self.weight_ratio is True: + loss = super().forward(out1, out2, batch) + else: + loss = super().forward(out1, out2) if isinstance(loss, dict): for key in loss: loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], @@ -122,6 +130,7 @@ class DistillationDistanceLoss(DistanceLoss): def __init__(self, mode="l2", model_name_pairs=[], + act=None, key=None, name="loss_", **kargs): @@ -130,6 +139,13 @@ class DistillationDistanceLoss(DistanceLoss): self.key = key self.model_name_pairs = model_name_pairs self.name = name + mode + assert act in [None, "sigmoid", "softmax"] + if act == "sigmoid": + self.act = nn.Sigmoid() + elif act == "softmax": + self.act = nn.Softmax(axis=-1) + else: + self.act = None def forward(self, predicts, batch): loss_dict = dict() @@ -139,6 +155,9 @@ class DistillationDistanceLoss(DistanceLoss): if self.key is not None: out1 = out1[self.key] out2 = out2[self.key] + if self.act is not None: + out1 = self.act(out1) + out2 = self.act(out2) loss = super().forward(out1, out2) for key in loss: loss_dict["{}_{}_{}".format(self.name, key, idx)] = loss[key] @@ -218,8 +237,13 @@ class DistillationDKDLoss(DKDLoss): temperature=1.0, alpha=1.0, beta=1.0, + use_target_as_gt=False, name="loss_dkd"): - super().__init__(temperature=temperature, alpha=alpha, beta=beta) + super().__init__( + temperature=temperature, + alpha=alpha, + beta=beta, + use_target_as_gt=use_target_as_gt) self.key = key self.model_name_pairs = model_name_pairs self.name = name @@ -235,3 +259,63 @@ class DistillationDKDLoss(DKDLoss): loss = super().forward(out1, out2, batch) loss_dict[f"{self.name}_{pair[0]}_{pair[1]}"] = loss return loss_dict + + +class DistillationMultiLabelLoss(MultiLabelLoss): + """ + DistillationMultiLabelLoss + """ + + def __init__(self, + model_names=[], + epsilon=None, + size_sum=False, + weight_ratio=False, + key=None, + name="loss_mll"): + super().__init__( + epsilon=epsilon, size_sum=size_sum, weight_ratio=weight_ratio) + assert isinstance(model_names, list) + self.key = key + self.model_names = model_names + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for name in self.model_names: + out = predicts[name] + if self.key is not None: + out = out[self.key] + loss = super().forward(out, batch) + for key in loss: + loss_dict["{}_{}".format(key, name)] = loss[key] + return loss_dict + + +class DistillationDISTLoss(DISTLoss): + """ + DistillationDISTLoss + """ + + def __init__(self, + model_name_pairs=[], + key=None, + beta=1.0, + gamma=1.0, + name="loss_dist"): + super().__init__(beta=beta, gamma=gamma) + self.key = key + self.model_name_pairs = model_name_pairs + self.name = name + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + loss = super().forward(out1, out2) + loss_dict[f"{self.name}_{pair[0]}_{pair[1]}"] = loss + return loss_dict diff --git a/ppcls/loss/dkdloss.py b/ppcls/loss/dkdloss.py index 9ce2c56d9334697d784ebc0371d4d59120790154..bf9224e31f2b70ba61e6479cfa82f828006ee750 100644 --- a/ppcls/loss/dkdloss.py +++ b/ppcls/loss/dkdloss.py @@ -10,13 +10,20 @@ class DKDLoss(nn.Layer): Code was heavily based on https://github.com/megvii-research/mdistiller """ - def __init__(self, temperature=1.0, alpha=1.0, beta=1.0): + def __init__(self, + temperature=1.0, + alpha=1.0, + beta=1.0, + use_target_as_gt=False): super().__init__() self.temperature = temperature self.alpha = alpha self.beta = beta + self.use_target_as_gt = use_target_as_gt - def forward(self, logits_student, logits_teacher, target): + def forward(self, logits_student, logits_teacher, target=None): + if target is None or self.use_target_as_gt: + target = logits_teacher.argmax(axis=-1) gt_mask = _get_gt_mask(logits_student, target) other_mask = 1 - gt_mask pred_student = F.softmax(logits_student / self.temperature, axis=1) diff --git a/ppcls/loss/dmlloss.py b/ppcls/loss/dmlloss.py index 48bf6c02429084badb95cd9d5806a2ee4c20452e..e8983ed08a9e26da6b4df983becd8a9cbdbfab39 100644 --- a/ppcls/loss/dmlloss.py +++ b/ppcls/loss/dmlloss.py @@ -16,13 +16,15 @@ import paddle import paddle.nn as nn import paddle.nn.functional as F +from ppcls.loss.multilabelloss import ratio2weight + class DMLLoss(nn.Layer): """ DMLLoss """ - def __init__(self, act="softmax", eps=1e-12): + def __init__(self, act="softmax", sum_across_class_dim=False, eps=1e-12): super().__init__() if act is not None: assert act in ["softmax", "sigmoid"] @@ -33,6 +35,7 @@ class DMLLoss(nn.Layer): else: self.act = None self.eps = eps + self.sum_across_class_dim = sum_across_class_dim def _kldiv(self, x, target): class_num = x.shape[-1] @@ -40,11 +43,20 @@ class DMLLoss(nn.Layer): (target + self.eps) / (x + self.eps)) * class_num return cost - def forward(self, x, target): + def forward(self, x, target, gt_label=None): if self.act is not None: x = self.act(x) target = self.act(target) loss = self._kldiv(x, target) + self._kldiv(target, x) loss = loss / 2 - loss = paddle.mean(loss) + + # for multi-label dml loss + if gt_label is not None: + gt_label, label_ratio = gt_label[:, 0, :], gt_label[:, 1, :] + targets_mask = paddle.cast(gt_label > 0.5, 'float32') + weight = ratio2weight(targets_mask, paddle.to_tensor(label_ratio)) + weight = weight * (gt_label > -1) + loss = loss * weight + + loss = loss.sum(1).mean() if self.sum_across_class_dim else loss.mean() return {"DMLLoss": loss} diff --git a/ppcls/loss/multilabelloss.py b/ppcls/loss/multilabelloss.py index d30d5b8d18083385567d0bcdffaa1fd2da4876f5..a88d8265a0c1fe9f21708ae27cabf6a5144f052d 100644 --- a/ppcls/loss/multilabelloss.py +++ b/ppcls/loss/multilabelloss.py @@ -3,16 +3,29 @@ import paddle.nn as nn import paddle.nn.functional as F +def ratio2weight(targets, ratio): + pos_weights = targets * (1. - ratio) + neg_weights = (1. - targets) * ratio + weights = paddle.exp(neg_weights + pos_weights) + + # for RAP dataloader, targets element may be 2, with or without smooth, some element must great than 1 + weights = weights - weights * (targets > 1) + + return weights + + class MultiLabelLoss(nn.Layer): """ Multi-label loss """ - def __init__(self, epsilon=None): + def __init__(self, epsilon=None, size_sum=False, weight_ratio=False): super().__init__() if epsilon is not None and (epsilon <= 0 or epsilon >= 1): epsilon = None self.epsilon = epsilon + self.weight_ratio = weight_ratio + self.size_sum = size_sum def _labelsmoothing(self, target, class_num): if target.ndim == 1 or target.shape[-1] != class_num: @@ -24,13 +37,21 @@ class MultiLabelLoss(nn.Layer): return soft_target def _binary_crossentropy(self, input, target, class_num): + if self.weight_ratio: + target, label_ratio = target[:, 0, :], target[:, 1, :] if self.epsilon is not None: target = self._labelsmoothing(target, class_num) - cost = F.binary_cross_entropy_with_logits( - logit=input, label=target) - else: - cost = F.binary_cross_entropy_with_logits( - logit=input, label=target) + cost = F.binary_cross_entropy_with_logits( + logit=input, label=target, reduction='none') + + if self.weight_ratio: + targets_mask = paddle.cast(target > 0.5, 'float32') + weight = ratio2weight(targets_mask, paddle.to_tensor(label_ratio)) + weight = weight * (target > -1) + cost = cost * weight + + if self.size_sum: + cost = cost.sum(1).mean() if self.size_sum else cost.mean() return cost diff --git a/ppcls/metric/__init__.py b/ppcls/metric/__init__.py index 94721235bca5ab4c27ddba36dd265a01cea003ad..1f49cc2d9c4e8a70287b416447c0d1d98a582113 100644 --- a/ppcls/metric/__init__.py +++ b/ppcls/metric/__init__.py @@ -12,17 +12,19 @@ #See the License for the specific language governing permissions and #limitations under the License. -from paddle import nn import copy from collections import OrderedDict +from .avg_metrics import AvgMetrics from .metrics import TopkAcc, mAP, mINP, Recallk, Precisionk from .metrics import DistillationTopkAcc from .metrics import GoogLeNetTopkAcc from .metrics import HammingDistance, AccuracyScore +from .metrics import ATTRMetric +from .metrics import TprAtFpr -class CombinedMetrics(nn.Layer): +class CombinedMetrics(AvgMetrics): def __init__(self, config_list): super().__init__() self.metric_func_list = [] @@ -38,13 +40,30 @@ class CombinedMetrics(nn.Layer): eval(metric_name)(**metric_params)) else: self.metric_func_list.append(eval(metric_name)()) + self.reset() - def __call__(self, *args, **kwargs): + def forward(self, *args, **kwargs): metric_dict = OrderedDict() for idx, metric_func in enumerate(self.metric_func_list): metric_dict.update(metric_func(*args, **kwargs)) return metric_dict + @property + def avg_info(self): + return ", ".join([metric.avg_info for metric in self.metric_func_list]) + + @property + def avg(self): + return self.metric_func_list[0].avg + + def attr_res(self): + return self.metric_func_list[0].attrmeter.res() + + def reset(self): + for metric in self.metric_func_list: + if hasattr(metric, "reset"): + metric.reset() + def build_metrics(config): metrics_list = CombinedMetrics(copy.deepcopy(config)) diff --git a/ppcls/metric/avg_metrics.py b/ppcls/metric/avg_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..6f4b62290b3d03879f8910b197b59b5448cb7247 --- /dev/null +++ b/ppcls/metric/avg_metrics.py @@ -0,0 +1,20 @@ +from paddle import nn + + +class AvgMetrics(nn.Layer): + def __init__(self): + super().__init__() + self.avg_meters = {} + + def reset(self): + self.avg_meters = {} + + @property + def avg(self): + if self.avg_meters: + for metric_key in self.avg_meters: + return self.avg_meters[metric_key].avg + + @property + def avg_info(self): + return ", ".join([self.avg_meters[key].avg_info for key in self.avg_meters]) diff --git a/ppcls/metric/metrics.py b/ppcls/metric/metrics.py index 03e742082b57439227746d21695379b498e7f1d8..0c803ccfdbb29216381625ea3df4a4540c7b56c0 100644 --- a/ppcls/metric/metrics.py +++ b/ppcls/metric/metrics.py @@ -22,36 +22,61 @@ from sklearn.metrics import accuracy_score as accuracy_metric from sklearn.metrics import multilabel_confusion_matrix from sklearn.preprocessing import binarize +from easydict import EasyDict -class TopkAcc(nn.Layer): +from ppcls.metric.avg_metrics import AvgMetrics +from ppcls.utils.misc import AverageMeter, AttrMeter +from ppcls.utils import logger + + +class TopkAcc(AvgMetrics): def __init__(self, topk=(1, 5)): super().__init__() assert isinstance(topk, (int, list, tuple)) if isinstance(topk, int): topk = [topk] self.topk = topk + self.reset() + + def reset(self): + self.avg_meters = { + f"top{k}": AverageMeter(f"top{k}") + for k in self.topk + } def forward(self, x, label): if isinstance(x, dict): x = x["logits"] + output_dims = x.shape[-1] + metric_dict = dict() - for k in self.topk: - metric_dict["top{}".format(k)] = paddle.metric.accuracy( - x, label, k=k) + for idx, k in enumerate(self.topk): + if output_dims < k: + msg = f"The output dims({output_dims}) is less than k({k}), and the argument {k} of Topk has been removed." + logger.warning(msg) + self.avg_meters.pop(f"top{k}") + continue + metric_dict[f"top{k}"] = paddle.metric.accuracy(x, label, k=k) + self.avg_meters[f"top{k}"].update(metric_dict[f"top{k}"], + x.shape[0]) + + self.topk = list(filter(lambda k: k <= output_dims, self.topk)) + return metric_dict class mAP(nn.Layer): - def __init__(self): + def __init__(self, descending=True): super().__init__() + self.descending = descending def forward(self, similarities_matrix, query_img_id, gallery_img_id, keep_mask): metric_dict = dict() choosen_indices = paddle.argsort( - similarities_matrix, axis=1, descending=True) + similarities_matrix, axis=1, descending=self.descending) gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) gallery_labels_transpose = paddle.broadcast_to( gallery_labels_transpose, @@ -87,15 +112,16 @@ class mAP(nn.Layer): class mINP(nn.Layer): - def __init__(self): + def __init__(self, descending=True): super().__init__() + self.descending = descending def forward(self, similarities_matrix, query_img_id, gallery_img_id, keep_mask): metric_dict = dict() choosen_indices = paddle.argsort( - similarities_matrix, axis=1, descending=True) + similarities_matrix, axis=1, descending=self.descending) gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) gallery_labels_transpose = paddle.broadcast_to( gallery_labels_transpose, @@ -106,7 +132,7 @@ class mINP(nn.Layer): choosen_indices) equal_flag = paddle.equal(choosen_label, query_img_id) if keep_mask is not None: - keep_mask = paddle.index_sample( + keep_mask = paddle.indechmx_sample( keep_mask.astype('float32'), choosen_indices) equal_flag = paddle.logical_and(equal_flag, keep_mask.astype('bool')) @@ -129,13 +155,69 @@ class mINP(nn.Layer): return metric_dict +class TprAtFpr(nn.Layer): + def __init__(self, max_fpr=1 / 1000.): + super().__init__() + self.gt_pos_score_list = [] + self.gt_neg_score_list = [] + self.softmax = nn.Softmax(axis=-1) + self.max_fpr = max_fpr + self.max_tpr = 0. + + def forward(self, x, label): + if isinstance(x, dict): + x = x["logits"] + x = self.softmax(x) + for i, label_i in enumerate(label): + if label_i[0] == 0: + self.gt_neg_score_list.append(x[i][1].numpy()) + else: + self.gt_pos_score_list.append(x[i][1].numpy()) + return {} + + def reset(self): + self.gt_pos_score_list = [] + self.gt_neg_score_list = [] + self.max_tpr = 0. + + @property + def avg(self): + return self.max_tpr + + @property + def avg_info(self): + max_tpr = 0. + result = "" + gt_pos_score_list = np.array(self.gt_pos_score_list) + gt_neg_score_list = np.array(self.gt_neg_score_list) + for i in range(0, 10000): + threshold = i / 10000. + if len(gt_pos_score_list) == 0: + continue + tpr = np.sum( + gt_pos_score_list > threshold) / len(gt_pos_score_list) + if len(gt_neg_score_list) == 0 and tpr > max_tpr: + max_tpr = tpr + result = "threshold: {}, fpr: {}, tpr: {:.5f}".format( + threshold, fpr, tpr) + fpr = np.sum( + gt_neg_score_list > threshold) / len(gt_neg_score_list) + if fpr <= self.max_fpr and tpr > max_tpr: + max_tpr = tpr + result = "threshold: {}, fpr: {}, tpr: {:.5f}".format( + threshold, fpr, tpr) + self.max_tpr = max_tpr + return result + + class Recallk(nn.Layer): - def __init__(self, topk=(1, 5)): + def __init__(self, topk=(1, 5), descending=True): super().__init__() assert isinstance(topk, (int, list, tuple)) if isinstance(topk, int): topk = [topk] self.topk = topk + self.descending = descending def forward(self, similarities_matrix, query_img_id, gallery_img_id, keep_mask): @@ -143,7 +225,7 @@ class Recallk(nn.Layer): #get cmc choosen_indices = paddle.argsort( - similarities_matrix, axis=1, descending=True) + similarities_matrix, axis=1, descending=self.descending) gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) gallery_labels_transpose = paddle.broadcast_to( gallery_labels_transpose, @@ -175,12 +257,13 @@ class Recallk(nn.Layer): class Precisionk(nn.Layer): - def __init__(self, topk=(1, 5)): + def __init__(self, topk=(1, 5), descending=True): super().__init__() assert isinstance(topk, (int, list, tuple)) if isinstance(topk, int): topk = [topk] self.topk = topk + self.descending = descending def forward(self, similarities_matrix, query_img_id, gallery_img_id, keep_mask): @@ -188,7 +271,7 @@ class Precisionk(nn.Layer): #get cmc choosen_indices = paddle.argsort( - similarities_matrix, axis=1, descending=True) + similarities_matrix, axis=1, descending=self.descending) gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0]) gallery_labels_transpose = paddle.broadcast_to( gallery_labels_transpose, @@ -241,20 +324,17 @@ class GoogLeNetTopkAcc(TopkAcc): return super().forward(x[0], label) -class MutiLabelMetric(object): - def __init__(self): - pass - - def _multi_hot_encode(self, logits, threshold=0.5): - return binarize(logits, threshold=threshold) +class MultiLabelMetric(AvgMetrics): + def __init__(self, bi_threshold=0.5): + super().__init__() + self.bi_threshold = bi_threshold - def __call__(self, output): - output = F.sigmoid(output) - preds = self._multi_hot_encode(logits=output.numpy(), threshold=0.5) - return preds + def _multi_hot_encode(self, output): + logits = F.sigmoid(output).numpy() + return binarize(logits, threshold=self.bi_threshold) -class HammingDistance(MutiLabelMetric): +class HammingDistance(MultiLabelMetric): """ Soft metric based label for multilabel classification Returns: @@ -263,16 +343,22 @@ class HammingDistance(MutiLabelMetric): def __init__(self): super().__init__() + self.reset() + + def reset(self): + self.avg_meters = {"HammingDistance": AverageMeter("HammingDistance")} - def __call__(self, output, target): - preds = super().__call__(output) + def forward(self, output, target): + preds = super()._multi_hot_encode(output) metric_dict = dict() metric_dict["HammingDistance"] = paddle.to_tensor( hamming_loss(target, preds)) + self.avg_meters["HammingDistance"].update( + metric_dict["HammingDistance"].numpy()[0], output.shape[0]) return metric_dict -class AccuracyScore(MutiLabelMetric): +class AccuracyScore(MultiLabelMetric): """ Hard metric for multilabel classification Args: @@ -288,9 +374,13 @@ class AccuracyScore(MutiLabelMetric): assert base in ["sample", "label" ], 'must be one of ["sample", "label"]' self.base = base + self.reset() - def __call__(self, output, target): - preds = super().__call__(output) + def reset(self): + self.avg_meters = {"AccuracyScore": AverageMeter("AccuracyScore")} + + def forward(self, output, target): + preds = super()._multi_hot_encode(output) metric_dict = dict() if self.base == "sample": accuracy = accuracy_metric(target, preds) @@ -303,4 +393,67 @@ class AccuracyScore(MutiLabelMetric): accuracy = (sum(tps) + sum(tns)) / ( sum(tps) + sum(tns) + sum(fns) + sum(fps)) metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) + self.avg_meters["AccuracyScore"].update( + metric_dict["AccuracyScore"].numpy()[0], output.shape[0]) + return metric_dict + + +def get_attr_metrics(gt_label, preds_probs, threshold): + """ + index: evaluated label index + adapted from "https://github.com/valencebond/Rethinking_of_PAR/blob/master/metrics/pedestrian_metrics.py" + """ + pred_label = (preds_probs > threshold).astype(int) + + eps = 1e-20 + result = EasyDict() + + has_fuyi = gt_label == -1 + pred_label[has_fuyi] = -1 + + ############################### + # label metrics + # TP + FN + result.gt_pos = np.sum((gt_label == 1), axis=0).astype(float) + # TN + FP + result.gt_neg = np.sum((gt_label == 0), axis=0).astype(float) + # TP + result.true_pos = np.sum((gt_label == 1) * (pred_label == 1), + axis=0).astype(float) + # TN + result.true_neg = np.sum((gt_label == 0) * (pred_label == 0), + axis=0).astype(float) + # FP + result.false_pos = np.sum(((gt_label == 0) * (pred_label == 1)), + axis=0).astype(float) + # FN + result.false_neg = np.sum(((gt_label == 1) * (pred_label == 0)), + axis=0).astype(float) + + ################ + # instance metrics + result.gt_pos_ins = np.sum((gt_label == 1), axis=1).astype(float) + result.true_pos_ins = np.sum((pred_label == 1), axis=1).astype(float) + # true positive + result.intersect_pos = np.sum((gt_label == 1) * (pred_label == 1), + axis=1).astype(float) + # IOU + result.union_pos = np.sum(((gt_label == 1) + (pred_label == 1)), + axis=1).astype(float) + + return result + + +class ATTRMetric(nn.Layer): + def __init__(self, threshold=0.5): + super().__init__() + self.threshold = threshold + + def reset(self): + self.attrmeter = AttrMeter(threshold=0.5) + + def forward(self, output, target): + metric_dict = get_attr_metrics(target[:, 0, :].numpy(), + output.numpy(), self.threshold) + self.attrmeter.update(metric_dict) return metric_dict diff --git a/ppcls/optimizer/optimizer.py b/ppcls/optimizer/optimizer.py index be6fa9f70ab8c50422e8f068b8e1c58a3b1c53e9..c0403cf95cdaf442b6fdaeea54d21a2382e3858b 100644 --- a/ppcls/optimizer/optimizer.py +++ b/ppcls/optimizer/optimizer.py @@ -16,9 +16,9 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from paddle import optimizer as optim -import paddle +import inspect +from paddle import optimizer as optim from ppcls.utils import logger @@ -49,21 +49,32 @@ class SGD(object): learning_rate=0.001, weight_decay=None, grad_clip=None, + multi_precision=False, name=None): self.learning_rate = learning_rate self.weight_decay = weight_decay self.grad_clip = grad_clip + self.multi_precision = multi_precision self.name = name def __call__(self, model_list): # model_list is None in static graph parameters = sum([m.parameters() for m in model_list], []) if model_list else None - opt = optim.SGD(learning_rate=self.learning_rate, - parameters=parameters, - weight_decay=self.weight_decay, - grad_clip=self.grad_clip, - name=self.name) + argspec = inspect.getargspec(optim.SGD.__init__).args + if 'multi_precision' in argspec: + opt = optim.SGD(learning_rate=self.learning_rate, + parameters=parameters, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + multi_precision=self.multi_precision, + name=self.name) + else: + opt = optim.SGD(learning_rate=self.learning_rate, + parameters=parameters, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + name=self.name) return opt @@ -242,8 +253,9 @@ class AdamW(object): if self.one_dim_param_no_weight_decay: self.no_weight_decay_param_name_list += [ - p.name for model in model_list - for n, p in model.named_parameters() if len(p.shape) == 1 + p.name + for model in model_list for n, p in model.named_parameters() + if len(p.shape) == 1 ] if model_list else [] opt = optim.AdamW( diff --git a/ppcls/static/program.py b/ppcls/static/program.py index 29107c9c1c1d8f571f0f8cf1cf0b7357ae3100ea..a6a80f13e07d6b040af17a16e6c0324492cfe174 100644 --- a/ppcls/static/program.py +++ b/ppcls/static/program.py @@ -371,6 +371,11 @@ def run(dataloader, "Except RuntimeError when reading data from dataloader, try to read once again..." ) continue + except IndexError: + logger.warning( + "Except IndexError when reading data from dataloader, try to read once again..." + ) + continue idx += 1 # ignore the warmup iters if idx == 5: @@ -439,8 +444,7 @@ def run(dataloader, logger.info("END {:s} {:s} {:s}".format(mode, end_str, ips_info)) else: end_epoch_str = "END epoch:{:<3d}".format(epoch) - logger.info("{:s} {:s} {:s} {:s}".format(end_epoch_str, mode, end_str, - ips_info)) + logger.info("{:s} {:s} {:s}".format(end_epoch_str, mode, end_str)) if use_dali: dataloader.reset() diff --git a/ppcls/static/save_load.py b/ppcls/static/save_load.py index 13badfddc87b111e51f1c3d52ff0c53f11fdbc7e..5d124fcf7472a7fadfe62360fd8fc5b9d8b8fcc2 100644 --- a/ppcls/static/save_load.py +++ b/ppcls/static/save_load.py @@ -62,8 +62,8 @@ def load_params(exe, prog, path, ignore_params=None): """ Load model from the given path. Args: - exe (fluid.Executor): The fluid.Executor object. - prog (fluid.Program): load weight to which Program object. + exe (paddle.static.Executor): The paddle.static.Executor object. + prog (paddle.static.Program): load weight to which Program object. path (string): URL string or loca model path. ignore_params (list): ignore variable to load when finetuning. It can be specified by finetune_exclude_pretrained_params diff --git a/ppcls/static/train.py b/ppcls/static/train.py index eb803970b23954324e7870133b61fe4bb7122a0c..86e832499345f581b1d1dc2c1ef40d6009491622 100644 --- a/ppcls/static/train.py +++ b/ppcls/static/train.py @@ -87,7 +87,7 @@ def main(args): 'FLAGS_max_inplace_grad_add': 8, } os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' - paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + paddle.set_flags(AMP_RELATED_FLAGS_SETTING) use_xpu = global_config.get("use_xpu", False) use_npu = global_config.get("use_npu", False) diff --git a/ppcls/utils/PULC_label_list/language_classification_label_list.txt b/ppcls/utils/PULC_label_list/language_classification_label_list.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d9ee9dd86adfa7bdcced51220d48dde5511abc1 --- /dev/null +++ b/ppcls/utils/PULC_label_list/language_classification_label_list.txt @@ -0,0 +1,10 @@ +0 arabic +1 chinese_cht +2 cyrillic +3 devanagari +4 japan +5 ka +6 korean +7 ta +8 te +9 latin \ No newline at end of file diff --git a/ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt b/ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt new file mode 100644 index 0000000000000000000000000000000000000000..051944a929f323a3a25f1807ac0297170513484a --- /dev/null +++ b/ppcls/utils/PULC_label_list/text_image_orientation_label_list.txt @@ -0,0 +1,4 @@ +0 0 +1 90 +2 180 +3 270 diff --git a/ppcls/utils/PULC_label_list/textline_orientation_label_list.txt b/ppcls/utils/PULC_label_list/textline_orientation_label_list.txt new file mode 100644 index 0000000000000000000000000000000000000000..207b70c6b188d05ecb2a04c8f4946c993616e544 --- /dev/null +++ b/ppcls/utils/PULC_label_list/textline_orientation_label_list.txt @@ -0,0 +1,2 @@ +0 0_degree +1 180_degree diff --git a/ppcls/utils/PULC_label_list/traffic_sign_label_list.txt b/ppcls/utils/PULC_label_list/traffic_sign_label_list.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1e41d539d1af5611b2b047d664000b8f41afb15 --- /dev/null +++ b/ppcls/utils/PULC_label_list/traffic_sign_label_list.txt @@ -0,0 +1,232 @@ +0 pl80 +1 w9 +2 p6 +3 ph4.2 +4 i8 +5 w14 +6 w33 +7 pa13 +8 im +9 w58 +10 pl90 +11 il70 +12 p5 +13 pm55 +14 pl60 +15 ip +16 p11 +17 pdd +18 wc +19 i2r +20 w30 +21 pmr +22 p23 +23 pl15 +24 pm10 +25 pss +26 w1 +27 p4 +28 w38 +29 w50 +30 w34 +31 pw3.5 +32 iz +33 w39 +34 w11 +35 p1n +36 pr70 +37 pd +38 pnl +39 pg +40 ph5.3 +41 w66 +42 il80 +43 pb +44 pbm +45 pm5 +46 w24 +47 w67 +48 w49 +49 pm40 +50 ph4 +51 w45 +52 i4 +53 w37 +54 ph2.6 +55 pl70 +56 ph5.5 +57 i14 +58 i11 +59 p7 +60 p29 +61 pne +62 pr60 +63 pm13 +64 ph4.5 +65 p12 +66 p3 +67 w40 +68 pl5 +69 w13 +70 pr10 +71 p14 +72 i4l +73 pr30 +74 pw4.2 +75 w16 +76 p17 +77 ph3 +78 i9 +79 w15 +80 w35 +81 pa8 +82 pt +83 pr45 +84 w17 +85 pl30 +86 pcs +87 pctl +88 pr50 +89 ph4.4 +90 pm46 +91 pm35 +92 i15 +93 pa12 +94 pclr +95 i1 +96 pcd +97 pbp +98 pcr +99 w28 +100 ps +101 pm8 +102 w18 +103 w2 +104 w52 +105 ph2.9 +106 ph1.8 +107 pe +108 p20 +109 w36 +110 p10 +111 pn +112 pa14 +113 w54 +114 ph3.2 +115 p2 +116 ph2.5 +117 w62 +118 w55 +119 pw3 +120 pw4.5 +121 i12 +122 ph4.3 +123 phclr +124 i10 +125 pr5 +126 i13 +127 w10 +128 p26 +129 w26 +130 p8 +131 w5 +132 w42 +133 il50 +134 p13 +135 pr40 +136 p25 +137 w41 +138 pl20 +139 ph4.8 +140 pnlc +141 ph3.3 +142 w29 +143 ph2.1 +144 w53 +145 pm30 +146 p24 +147 p21 +148 pl40 +149 w27 +150 pmb +151 pc +152 i6 +153 pr20 +154 p18 +155 ph3.8 +156 pm50 +157 pm25 +158 i2 +159 w22 +160 w47 +161 w56 +162 pl120 +163 ph2.8 +164 i7 +165 w12 +166 pm1.5 +167 pm2.5 +168 w32 +169 pm15 +170 ph5 +171 w19 +172 pw3.2 +173 pw2.5 +174 pl10 +175 il60 +176 w57 +177 w48 +178 w60 +179 pl100 +180 pr80 +181 p16 +182 pl110 +183 w59 +184 w64 +185 w20 +186 ph2 +187 p9 +188 il100 +189 w31 +190 w65 +191 ph2.4 +192 pr100 +193 p19 +194 ph3.5 +195 pa10 +196 pcl +197 pl35 +198 p15 +199 w7 +200 pa6 +201 phcs +202 w43 +203 p28 +204 w6 +205 w3 +206 w25 +207 pl25 +208 il110 +209 p1 +210 w46 +211 pn-2 +212 w51 +213 w44 +214 w63 +215 w23 +216 pm20 +217 w8 +218 pmblr +219 w4 +220 i5 +221 il90 +222 w21 +223 p27 +224 pl50 +225 pl65 +226 w61 +227 ph2.2 +228 pm2 +229 i3 +230 pa18 +231 pw4 diff --git a/ppcls/utils/download.py b/ppcls/utils/download.py index 9c4575048d3f579d93fcd315ac5193078e5f131f..3aeda0ced44f7cc5916b918e5980b86b977a23fe 100644 --- a/ppcls/utils/download.py +++ b/ppcls/utils/download.py @@ -77,21 +77,6 @@ def _map_path(url, root_dir): return osp.join(root_dir, fpath) -def _get_unique_endpoints(trainer_endpoints): - # Sorting is to avoid different environmental variables for each card - trainer_endpoints.sort() - ips = set() - unique_endpoints = set() - for endpoint in trainer_endpoints: - ip = endpoint.split(":")[0] - if ip in ips: - continue - ips.add(ip) - unique_endpoints.add(endpoint) - logger.info("unique_endpoints {}".format(unique_endpoints)) - return unique_endpoints - - def get_path_from_url(url, root_dir, md5sum=None, @@ -112,26 +97,26 @@ def get_path_from_url(url, str: a local path to save downloaded models & weights & datasets. """ - from paddle.fluid.dygraph.parallel import ParallelEnv + from paddle.distributed import ParallelEnv assert is_url(url), "downloading from {} not a url".format(url) # parse path after download to decompress under root_dir fullpath = _map_path(url, root_dir) # Mainly used to solve the problem of downloading data from different - # machines in the case of multiple machines. Different ips will download - # data, and the same ip will only download data once. - unique_endpoints = _get_unique_endpoints(ParallelEnv() - .trainer_endpoints[:]) + # machines in the case of multiple machines. Different nodes will download + # data, and the same node will only download data once. + rank_id_curr_node = int(os.environ.get("PADDLE_RANK_IN_NODE", 0)) + if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): logger.info("Found {}".format(fullpath)) else: - if ParallelEnv().current_endpoint in unique_endpoints: + if rank_id_curr_node == 0: fullpath = _download(url, root_dir, md5sum) else: while not os.path.exists(fullpath): time.sleep(1) - if ParallelEnv().current_endpoint in unique_endpoints: + if rank_id_curr_node == 0: if decompress and (tarfile.is_tarfile(fullpath) or zipfile.is_zipfile(fullpath)): fullpath = _decompress(fullpath) diff --git a/ppcls/utils/ema.py b/ppcls/utils/ema.py index b54cdb1b2030dc0a70394816a433e7e715e12996..8292781955210d68cea119b2fd887b534b3a6c04 100644 --- a/ppcls/utils/ema.py +++ b/ppcls/utils/ema.py @@ -1,10 +1,10 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,52 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. +from copy import deepcopy + import paddle -import numpy as np class ExponentialMovingAverage(): """ Exponential Moving Average - Code was heavily based on https://github.com/Wanger-SJTU/SegToolbox.Pytorch/blob/master/lib/utils/ema.py + Code was heavily based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/utils/model_ema.py """ - def __init__(self, model, decay, thres_steps=True): - self._model = model - self._decay = decay - self._thres_steps = thres_steps - self._shadow = {} - self._backup = {} - - def register(self): - self._update_step = 0 - for name, param in self._model.named_parameters(): - if param.stop_gradient is False: - self._shadow[name] = param.numpy().copy() - - def update(self): - decay = min(self._decay, (1 + self._update_step) / ( - 10 + self._update_step)) if self._thres_steps else self._decay - for name, param in self._model.named_parameters(): - if param.stop_gradient is False: - assert name in self._shadow - new_val = np.array(param.numpy().copy()) - old_val = np.array(self._shadow[name]) - new_average = decay * old_val + (1 - decay) * new_val - self._shadow[name] = new_average - self._update_step += 1 - return decay - - def apply(self): - for name, param in self._model.named_parameters(): - if param.stop_gradient is False: - assert name in self._shadow - self._backup[name] = np.array(param.numpy().copy()) - param.set_value(np.array(self._shadow[name])) - - def restore(self): - for name, param in self._model.named_parameters(): - if param.stop_gradient is False: - assert name in self._backup - param.set_value(self._backup[name]) - self._backup = {} + def __init__(self, model, decay=0.9999): + super().__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + + @paddle.no_grad() + def _update(self, model, update_fn): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + ema_v.set_value(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) diff --git a/ppcls/utils/logger.py b/ppcls/utils/logger.py index bc8de364091e9b56dafdcffa4475f7f225306e1b..5edca7a12a9dcff213d5663334e034ee42595d8f 100644 --- a/ppcls/utils/logger.py +++ b/ppcls/utils/logger.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime +import logging import os import sys -import logging -import datetime import paddle.distributed as dist _logger = None @@ -39,8 +39,12 @@ def init_logger(name='ppcls', log_file=None, log_level=logging.INFO): logging.Logger: The expected logger. """ global _logger - assert _logger is None, "logger should not be initialized twice or more." - _logger = logging.getLogger(name) + + # solve mutiple init issue when using paddleclas.py and engin.engin + init_flag = False + if _logger is None: + _logger = logging.getLogger(name) + init_flag = True formatter = logging.Formatter( '[%(asctime)s] %(name)s %(levelname)s: %(message)s', @@ -48,13 +52,32 @@ def init_logger(name='ppcls', log_file=None, log_level=logging.INFO): stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setFormatter(formatter) - _logger.addHandler(stream_handler) + stream_handler._name = 'stream_handler' + + # add stream_handler when _logger dose not contain stream_handler + for i, h in enumerate(_logger.handlers): + if h.get_name() == stream_handler.get_name(): + break + if i == len(_logger.handlers) - 1: + _logger.addHandler(stream_handler) + if init_flag: + _logger.addHandler(stream_handler) + if log_file is not None and dist.get_rank() == 0: log_file_folder = os.path.split(log_file)[0] os.makedirs(log_file_folder, exist_ok=True) file_handler = logging.FileHandler(log_file, 'a') file_handler.setFormatter(formatter) - _logger.addHandler(file_handler) + file_handler._name = 'file_handler' + + # add file_handler when _logger dose not contain same file_handler + for i, h in enumerate(_logger.handlers): + if h.get_name() == file_handler.get_name() and \ + h.baseFilename == file_handler.baseFilename: + break + if i == len(_logger.handlers) - 1: + _logger.addHandler(file_handler) + if dist.get_rank() == 0: _logger.setLevel(log_level) else: diff --git a/ppcls/utils/misc.py b/ppcls/utils/misc.py index 08ab7b6f77cb85b0a822713ee7d573d561762d14..8015552437998264322661518ba3ce40c7cd7db5 100644 --- a/ppcls/utils/misc.py +++ b/ppcls/utils/misc.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import paddle + __all__ = ['AverageMeter'] @@ -42,6 +44,12 @@ class AverageMeter(object): self.count += n self.avg = self.sum / self.count + @property + def avg_info(self): + if isinstance(self.avg, paddle.Tensor): + self.avg = self.avg.numpy()[0] + return "{}: {:.5f}".format(self.name, self.avg) + @property def total(self): return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format( @@ -61,3 +69,87 @@ class AverageMeter(object): def value(self): return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format( self=self) + + +class AttrMeter(object): + """ + Computes and stores the average and current value + Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py + """ + + def __init__(self, threshold=0.5): + self.threshold = threshold + self.reset() + + def reset(self): + self.gt_pos = 0 + self.gt_neg = 0 + self.true_pos = 0 + self.true_neg = 0 + self.false_pos = 0 + self.false_neg = 0 + + self.gt_pos_ins = [] + self.true_pos_ins = [] + self.intersect_pos = [] + self.union_pos = [] + + def update(self, metric_dict): + self.gt_pos += metric_dict['gt_pos'] + self.gt_neg += metric_dict['gt_neg'] + self.true_pos += metric_dict['true_pos'] + self.true_neg += metric_dict['true_neg'] + self.false_pos += metric_dict['false_pos'] + self.false_neg += metric_dict['false_neg'] + + self.gt_pos_ins += metric_dict['gt_pos_ins'].tolist() + self.true_pos_ins += metric_dict['true_pos_ins'].tolist() + self.intersect_pos += metric_dict['intersect_pos'].tolist() + self.union_pos += metric_dict['union_pos'].tolist() + + def res(self): + import numpy as np + eps = 1e-20 + label_pos_recall = 1.0 * self.true_pos / ( + self.gt_pos + eps) # true positive + label_neg_recall = 1.0 * self.true_neg / ( + self.gt_neg + eps) # true negative + # mean accuracy + label_ma = (label_pos_recall + label_neg_recall) / 2 + + label_pos_recall = np.mean(label_pos_recall) + label_neg_recall = np.mean(label_neg_recall) + label_prec = (self.true_pos / (self.true_pos + self.false_pos + eps)) + label_acc = (self.true_pos / + (self.true_pos + self.false_pos + self.false_neg + eps)) + label_f1 = np.mean(2 * label_prec * label_pos_recall / + (label_prec + label_pos_recall + eps)) + + ma = (np.mean(label_ma)) + + self.gt_pos_ins = np.array(self.gt_pos_ins) + self.true_pos_ins = np.array(self.true_pos_ins) + self.intersect_pos = np.array(self.intersect_pos) + self.union_pos = np.array(self.union_pos) + instance_acc = self.intersect_pos / (self.union_pos + eps) + instance_prec = self.intersect_pos / (self.true_pos_ins + eps) + instance_recall = self.intersect_pos / (self.gt_pos_ins + eps) + instance_f1 = 2 * instance_prec * instance_recall / ( + instance_prec + instance_recall + eps) + + instance_acc = np.mean(instance_acc) + instance_prec = np.mean(instance_prec) + instance_recall = np.mean(instance_recall) + instance_f1 = 2 * instance_prec * instance_recall / ( + instance_prec + instance_recall + eps) + + instance_acc = np.mean(instance_acc) + instance_prec = np.mean(instance_prec) + instance_recall = np.mean(instance_recall) + instance_f1 = np.mean(instance_f1) + + res = [ + ma, label_f1, label_pos_recall, label_neg_recall, instance_f1, + instance_acc, instance_prec, instance_recall + ] + return res diff --git a/ppcls/utils/save_load.py b/ppcls/utils/save_load.py index 4e27f12c1d4830f2f16580bfa976cf3ace78d934..31323e9ae11b3245c898f412057a15fb56734b0a 100644 --- a/ppcls/utils/save_load.py +++ b/ppcls/utils/save_load.py @@ -42,6 +42,14 @@ def _mkdir_if_not_exist(path): raise OSError('Failed to mkdir {}'.format(path)) +def _extract_student_weights(all_params, student_prefix="Student."): + s_params = { + key[len(student_prefix):]: all_params[key] + for key in all_params if student_prefix in key + } + return s_params + + def load_dygraph_pretrain(model, path=None): if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')): raise ValueError("Model pretrain path {}.pdparams does not " @@ -87,7 +95,11 @@ def load_distillation_model(model, pretrained_model): pretrained_model)) -def init_model(config, net, optimizer=None, loss: paddle.nn.Layer=None): +def init_model(config, + net, + optimizer=None, + loss: paddle.nn.Layer=None, + ema=None): """ load model from checkpoint or pretrained_model """ @@ -105,7 +117,13 @@ def init_model(config, net, optimizer=None, loss: paddle.nn.Layer=None): net.set_state_dict(para_dict) loss.set_state_dict(para_dict) for i in range(len(optimizer)): - optimizer[i].set_state_dict(opti_dict) + optimizer[i].set_state_dict(opti_dict[i] if isinstance( + opti_dict, list) else opti_dict) + if ema is not None: + assert os.path.exists(checkpoints + ".ema.pdparams"), \ + "Given dir {}.ema.pdparams not exist.".format(checkpoints) + para_ema_dict = paddle.load(checkpoints + ".ema.pdparams") + ema.set_state_dict(para_ema_dict) logger.info("Finish load checkpoints from {}".format(checkpoints)) return metric_dict @@ -117,16 +135,18 @@ def init_model(config, net, optimizer=None, loss: paddle.nn.Layer=None): else: # common load load_dygraph_pretrain(net, path=pretrained_model) logger.info("Finish load pretrained model from {}".format( - pretrained_model)) + pretrained_model)) def save_model(net, optimizer, metric_info, model_path, + ema=None, model_name="", prefix='ppcls', - loss: paddle.nn.Layer=None): + loss: paddle.nn.Layer=None, + save_student_model=False): """ save model to the target path """ @@ -137,13 +157,22 @@ def save_model(net, model_path = os.path.join(model_path, prefix) params_state_dict = net.state_dict() - loss_state_dict = loss.state_dict() - keys_inter = set(params_state_dict.keys()) & set(loss_state_dict.keys()) - assert len(keys_inter) == 0, \ - f"keys in model and loss state_dict must be unique, but got intersection {keys_inter}" - params_state_dict.update(loss_state_dict) + if loss is not None: + loss_state_dict = loss.state_dict() + keys_inter = set(params_state_dict.keys()) & set(loss_state_dict.keys( + )) + assert len(keys_inter) == 0, \ + f"keys in model and loss state_dict must be unique, but got intersection {keys_inter}" + params_state_dict.update(loss_state_dict) + + if save_student_model: + s_params = _extract_student_weights(params_state_dict) + if len(s_params) > 0: + paddle.save(s_params, model_path + "_student.pdparams") paddle.save(params_state_dict, model_path + ".pdparams") + if ema is not None: + paddle.save(ema.state_dict(), model_path + ".ema.pdparams") paddle.save([opt.state_dict() for opt in optimizer], model_path + ".pdopt") paddle.save(metric_info, model_path + ".pdstates") logger.info("Already save model in {}".format(model_path)) diff --git a/requirements.txt b/requirements.txt index 79f548c2232dc0af5e77390afd23bfb938a2b103..4787aa84805e84c26a1030f773fbd89826e1aa56 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,8 +4,9 @@ opencv-python==4.4.0.46 pillow tqdm PyYAML -visualdl >= 2.2.0 +visualdl>=2.2.0 scipy -scikit-learn==0.23.2 +scikit-learn>=0.21.0 gast==0.3.3 faiss-cpu==1.7.1.post2 +easydict diff --git a/setup.py b/setup.py index 57045d31903917fdb8634887a1f6e7207871ead5..c935136f40b93ce32f1dce7f4be482e6dcb4bce9 100644 --- a/setup.py +++ b/setup.py @@ -38,13 +38,16 @@ setup( version='0.0.0', install_requires=requirements, license='Apache License 2.0', - description='Awesome Image Classification toolkits based on PaddlePaddle ', + description='A treasure chest for visual recognition powered by PaddlePaddle.', long_description=readme(), long_description_content_type='text/markdown', url='https://github.com/PaddlePaddle/PaddleClas', download_url='https://github.com/PaddlePaddle/PaddleClas.git', keywords=[ - 'A treasure chest for image classification powered by PaddlePaddle.' + 'image-classification', 'image-recognition', 'pretrained-models', + 'knowledge-distillation', 'product-recognition', 'autoaugment', + 'cutmix', 'randaugment', 'gridmask', 'deit', 'repvgg', + 'swin-transformer', 'image-retrieval-system' ], classifiers=[ 'Intended Audience :: Developers', diff --git a/test_tipc/README.md b/test_tipc/README.md index 4869f6e11ddc78b7c05c7805bfb25ba7e41b683d..1bb7e573750c37b87a96bbd5ff7fb05415580b49 100644 --- a/test_tipc/README.md +++ b/test_tipc/README.md @@ -35,18 +35,23 @@ │ ├── MobileNetV3 # MobileNetV3系列模型测试配置文件目录 │ │ ├── MobileNetV3_large_x1_0_train_infer_python.txt #基础训练预测配置文件 │ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件 -│ │ └── MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件 -│ └── ResNet # ResNet系列模型测试配置文件目录 -│ ├── ResNet50_vd_train_infer_python.txt #基础训练预测配置文件 -│ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件 -│ └── ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件 -| ...... +│ │ ├── MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件 +│ │ ├── MobileNetV3_large_x1_0_paddle2onnx_infer_python.txt #paddle2onnx推理测试配置文件 +│ │ └── ...... +│ ├──ResNet # ResNet系列模型测试配置文件目录 +│ │ ├── ResNet50_vd_train_infer_python.txt #基础训练预测配置文件 +│ │ ├── ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt #多机多卡训练预测配置文件 +│ │ ├── ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt #混合精度训练预测配置文件 +│ │ ├── ResNet50_vd_paddle2onnx_infer_python.txt #paddle2onnx推理测试配置文件 +│ │ └── ...... +│ └── ...... ├── docs │ ├── guide.png │ └── test.png ├── prepare.sh # 完成test_*.sh运行所需要的数据和模型下载 ├── README.md # 使用文档 ├── results # 预先保存的预测结果,用于和实际预测结果进行精读比对 +├── test_paddle2onnx.sh # 测试paddle2onnx推理预测的主程序 └── test_train_inference_python.sh # 测试python训练预测的主程序 ``` @@ -99,10 +104,15 @@ bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/MobileNetV3/Mo ## 4 开始测试 -各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程: +各功能测试中涉及混合精度、裁剪、量化等训练相关,及mkldnn、Tensorrt等多种预测相关参数配置,请点击下方相应链接了解更多细节和使用教程: - [test_train_inference_python 使用](docs/test_train_inference_python.md):测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。 +- [test_train_pact_inference_python 使用](docs/test_train_pact_inference_python.md):测试基于Python的模型PACT在线量化等基本功能。 +- [test_train_ptq_inference_python 使用](docs/test_train_ptq_inference_python.md):测试基于Python的模型KL离线量化等基本功能。 - [test_inference_cpp 使用](docs/test_inference_cpp.md) :测试基于C++的模型推理。 - [test_serving 使用](docs/test_serving.md) :测试基于Paddle Serving的服务化部署功能。 - [test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md): 测试基于Paddle-Lite的ARM CPU端c++预测部署功能. - [test_paddle2onnx 使用](docs/test_paddle2onnx.md):测试Paddle2ONNX的模型转化功能,并验证正确性。 +- [test_serving_infer_python 使用](docs/test_serving_infer_python.md):测试python serving功能。 +- [test_serving_infer_cpp 使用](docs/test_serving_infer_cpp.md):测试cpp serving功能。 +- [test_train_fleet_inference_python 使用](./docs/test_train_fleet_inference_python.md):测试基于Python的多机多卡训练与推理等基本功能。 diff --git a/test_tipc/benchmark_train.sh b/test_tipc/benchmark_train.sh index 793b89476fb829034687b442c517546f5d8a4cfc..5c4d4112ad691569914ccf9b84480db9b76fa024 100644 --- a/test_tipc/benchmark_train.sh +++ b/test_tipc/benchmark_train.sh @@ -225,7 +225,7 @@ for batch_size in ${batch_size_list[*]}; do echo $cmd eval $cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${cmd}" "${status_log}" + status_check $last_status "${cmd}" "${status_log}" "${model_name}" else IFS=";" unset_env=`unset CUDA_VISIBLE_DEVICES` @@ -261,7 +261,7 @@ for batch_size in ${batch_size_list[*]}; do echo $cmd eval $cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${cmd}" "${status_log}" + status_check $last_status "${cmd}" "${status_log}" "${model_name}" fi done done diff --git a/test_tipc/common_func.sh b/test_tipc/common_func.sh index 63fa1014487ce43405896ddf97f5d2aae0344489..e0459366ed7d86d239624dc47937d91cc7704894 100644 --- a/test_tipc/common_func.sh +++ b/test_tipc/common_func.sh @@ -38,6 +38,7 @@ function func_set_params(){ function func_parser_params(){ strs=$1 + MODE=$2 IFS=":" array=(${strs}) key=${array[0]} @@ -64,10 +65,10 @@ function status_check(){ last_status=$1 # the exit code run_command=$2 run_log=$3 + model_name=$4 if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} else - echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} fi } - diff --git a/test_tipc/config/AlexNet/AlexNet_train_amp_infer_python.txt b/test_tipc/config/AlexNet/AlexNet_train_amp_infer_python.txt deleted file mode 100644 index c0cf046186576e77776c71699611bb541ddf1aaf..0000000000000000000000000000000000000000 --- a/test_tipc/config/AlexNet/AlexNet_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:AlexNet -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/AlexNet/AlexNet_train_infer_python.txt b/test_tipc/config/AlexNet/AlexNet_train_infer_python.txt deleted file mode 100644 index 56e19cd417967b51f14f20fad304a0be39f9ec7b..0000000000000000000000000000000000000000 --- a/test_tipc/config/AlexNet/AlexNet_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:AlexNet -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/CSPNet/CSPDarkNet53_train_amp_infer_python.txt b/test_tipc/config/CSPNet/CSPDarkNet53_train_amp_infer_python.txt deleted file mode 100644 index 534cb09f64aa706c7b1d6a225fac7fcf74d8fdef..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSPNet/CSPDarkNet53_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:CSPDarkNet53 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSPDarkNet53_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=288 -o PreProcess.transform_ops.1.CropImage.size=256 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/CSPNet/CSPDarkNet53_train_infer_python.txt b/test_tipc/config/CSPNet/CSPDarkNet53_train_infer_python.txt deleted file mode 100644 index bf82ef353d6446d639341d928df204c820fd9e3f..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSPNet/CSPDarkNet53_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:CSPDarkNet53 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSPDarkNet53_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=288 -o PreProcess.transform_ops.1.CropImage.size=256 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,256,256]}] \ No newline at end of file diff --git a/test_tipc/config/CSWinTransformer/CSWinTransformer_base_224_train_infer_python.txt b/test_tipc/config/CSWinTransformer/CSWinTransformer_base_224_train_infer_python.txt deleted file mode 100644 index 3de6add85078c13a323e0f62a8d805880f189c6c..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSWinTransformer/CSWinTransformer_base_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:CSWinTransformer_base_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_base_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/CSWinTransformer/CSWinTransformer_base_384_train_infer_python.txt b/test_tipc/config/CSWinTransformer/CSWinTransformer_base_384_train_infer_python.txt deleted file mode 100644 index 3a913e253ee31e04d5c3d86b3be34aeb0e777949..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSWinTransformer/CSWinTransformer_base_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:CSWinTransformer_base_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_base_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] diff --git a/test_tipc/config/CSWinTransformer/CSWinTransformer_large_224_train_infer_python.txt b/test_tipc/config/CSWinTransformer/CSWinTransformer_large_224_train_infer_python.txt deleted file mode 100644 index d376d4cba38f52522fb7ecc9a93e38bef1b7b710..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSWinTransformer/CSWinTransformer_large_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:CSWinTransformer_large_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/CSWinTransformer/CSWinTransformer_large_384_train_infer_python.txt b/test_tipc/config/CSWinTransformer/CSWinTransformer_large_384_train_infer_python.txt deleted file mode 100644 index 781fefe58dbe18b1bbf89026b59b95c995ecf0c5..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSWinTransformer/CSWinTransformer_large_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:CSWinTransformer_large_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:4 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] diff --git a/test_tipc/config/CSWinTransformer/CSWinTransformer_small_224_train_infer_python.txt b/test_tipc/config/CSWinTransformer/CSWinTransformer_small_224_train_infer_python.txt deleted file mode 100644 index 8057bb84b766e0f618b93a85f3f69f82fc8fa712..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSWinTransformer/CSWinTransformer_small_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:CSWinTransformer_small_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_small_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/CSWinTransformer/CSWinTransformer_tiny_224_train_infer_python.txt b/test_tipc/config/CSWinTransformer/CSWinTransformer_tiny_224_train_infer_python.txt deleted file mode 100644 index 55afdbc96cd7a6a82b5350a7a2b3e4da804d6ba0..0000000000000000000000000000000000000000 --- a/test_tipc/config/CSWinTransformer/CSWinTransformer_tiny_224_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:CSWinTransformer_tiny_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_tiny_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:128 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/ConvNeXt/ConvNeXt_tiny_train_infer_python.txt b/test_tipc/config/ConvNeXt/ConvNeXt_tiny_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..11b4007ef9fbaef563b028a10bf9f42eb2581f94 --- /dev/null +++ b/test_tipc/config/ConvNeXt/ConvNeXt_tiny_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ConvNeXt_tiny +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ConvNeXt/ConvNeXt_tiny.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +inference_dir:null +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA102_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA102_train_amp_infer_python.txt deleted file mode 100644 index bdc23ffa35fde038393a59546e3607573ee27ff8..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA102_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA102 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA102_train_infer_python.txt b/test_tipc/config/DLA/DLA102_train_infer_python.txt deleted file mode 100644 index e4c0edf0b229edb205531ce0c1437d6e43fdf322..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA102_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA102 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA102x2_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA102x2_train_amp_infer_python.txt deleted file mode 100644 index 2552690f31120311edb7dee5c4b334bc131d01d2..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA102x2_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA102x2 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x2_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA102x2_train_infer_python.txt b/test_tipc/config/DLA/DLA102x2_train_infer_python.txt deleted file mode 100644 index cdbc367c8dbd543e60e964fc748fa98fb6861dd2..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA102x2_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA102x2 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x2_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA102x_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA102x_train_amp_infer_python.txt deleted file mode 100644 index f1c19957bde3d68021ae83056a7ef5d429bdade4..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA102x_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA102x -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA102x_train_infer_python.txt b/test_tipc/config/DLA/DLA102x_train_infer_python.txt deleted file mode 100644 index f0316cb2eb6416b3a17112e23a74bd35694836f4..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA102x_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA102x -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA169_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA169_train_amp_infer_python.txt deleted file mode 100644 index 8ba21d09a359c1e86faf4557762edeaf4d911274..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA169_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA169 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA169_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA169_train_infer_python.txt b/test_tipc/config/DLA/DLA169_train_infer_python.txt deleted file mode 100644 index f3f23a8ec1db951aa73a1604fd3456120c30bdf0..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA169_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA169 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA169_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA34_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA34_train_amp_infer_python.txt deleted file mode 100644 index 5e63e494dcd60d83d21bc9f2050fe3e7f2d545de..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA34_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA34 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA34_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA34_train_infer_python.txt b/test_tipc/config/DLA/DLA34_train_infer_python.txt deleted file mode 100644 index 8360f0c05e59784322abdefaffb186ecacd39c08..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA34_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA34 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA34_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA46_c_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA46_c_train_amp_infer_python.txt deleted file mode 100644 index a9ec71f73cb1e5c60d5b4a77da4be21f8f69e305..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA46_c_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA46_c -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46_c_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA46_c_train_infer_python.txt b/test_tipc/config/DLA/DLA46_c_train_infer_python.txt deleted file mode 100644 index 1ca4c2725899c3b25bce3e5ddad7e88891da64e2..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA46_c_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA46_c -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46_c_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA46x_c_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA46x_c_train_amp_infer_python.txt deleted file mode 100644 index 0325c27822419e75cb4c882749383528ac76bed9..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA46x_c_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA46x_c -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46x_c_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA46x_c_train_infer_python.txt b/test_tipc/config/DLA/DLA46x_c_train_infer_python.txt deleted file mode 100644 index 3a3687523070e0b04be664305936904e5429011c..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA46x_c_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA46x_c -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46x_c_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA60_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA60_train_amp_infer_python.txt deleted file mode 100644 index ac7b55c22f25c1a14c7f5a61350b86611c776922..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA60_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA60 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA60_train_infer_python.txt b/test_tipc/config/DLA/DLA60_train_infer_python.txt deleted file mode 100644 index 8bb2fbd982fa700a62370c79004a1160686e36c4..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA60_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA60 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA60x_c_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA60x_c_train_amp_infer_python.txt deleted file mode 100644 index 7b66c715e6c95cdd30223f4efb61a846837d1e25..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA60x_c_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA60x_c -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA60x_c_train_infer_python.txt b/test_tipc/config/DLA/DLA60x_c_train_infer_python.txt deleted file mode 100644 index a9e3a1cfaa5382ec47d387174095f394cf2fcea6..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA60x_c_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA60x_c -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DLA/DLA60x_train_amp_infer_python.txt b/test_tipc/config/DLA/DLA60x_train_amp_infer_python.txt deleted file mode 100644 index 4f775c9431e67ac4494f20771f8ba7800c327c3f..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA60x_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DLA60x -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DLA/DLA60x_train_infer_python.txt b/test_tipc/config/DLA/DLA60x_train_infer_python.txt deleted file mode 100644 index 274f5a4253e013a8b67671fac01e636ba83af97f..0000000000000000000000000000000000000000 --- a/test_tipc/config/DLA/DLA60x_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DLA60x -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DPN/DPN107_train_amp_infer_python.txt b/test_tipc/config/DPN/DPN107_train_amp_infer_python.txt deleted file mode 100644 index 41102a8b14fe0c3e9c2923e88f19e193f68df6c2..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN107_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DPN107 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DPN/DPN107_train_infer_python.txt b/test_tipc/config/DPN/DPN107_train_infer_python.txt deleted file mode 100644 index 48b5cdbbb134d662fa40ad4e1c20236220786252..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN107_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DPN107 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DPN/DPN131_train_amp_infer_python.txt b/test_tipc/config/DPN/DPN131_train_amp_infer_python.txt deleted file mode 100644 index 1423a3e324df402f5e4cebb584d4616dd047e2ae..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN131_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DPN131 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DPN/DPN131_train_infer_python.txt b/test_tipc/config/DPN/DPN131_train_infer_python.txt deleted file mode 100644 index bbb7899c1c8c340d93654d6424b01bbde4a1ee21..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN131_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DPN131 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DPN/DPN68_train_amp_infer_python.txt b/test_tipc/config/DPN/DPN68_train_amp_infer_python.txt deleted file mode 100644 index 1a8ae3d6168c6f34023c266ef6d413bc9db683c6..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN68_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DPN68 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN68_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DPN/DPN68_train_infer_python.txt b/test_tipc/config/DPN/DPN68_train_infer_python.txt deleted file mode 100644 index 91a24e8d3cf3f170d88743eb58cd7794d2c07452..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN68_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DPN68 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN68_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DPN/DPN92_train_amp_infer_python.txt b/test_tipc/config/DPN/DPN92_train_amp_infer_python.txt deleted file mode 100644 index bccd5ffd03d320ca11d4105ce2055a0bf9177565..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN92_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DPN92 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN92_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DPN/DPN92_train_infer_python.txt b/test_tipc/config/DPN/DPN92_train_infer_python.txt deleted file mode 100644 index d160a970134085c6e40ff60eedd43bb2dca4dbbc..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN92_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DPN92 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN92_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DPN/DPN98_train_amp_infer_python.txt b/test_tipc/config/DPN/DPN98_train_amp_infer_python.txt deleted file mode 100644 index 0e488de5e3d8502b53b8b0148e7769a21df0b4ad..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN98_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DPN98 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN98_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DPN/DPN98_train_infer_python.txt b/test_tipc/config/DPN/DPN98_train_infer_python.txt deleted file mode 100644 index 3dc6f133c8be5a63047cfa65e7ba13e9a4051eeb..0000000000000000000000000000000000000000 --- a/test_tipc/config/DPN/DPN98_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DPN98 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN98_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DarkNet/DarkNet53_train_amp_infer_python.txt b/test_tipc/config/DarkNet/DarkNet53_train_amp_infer_python.txt deleted file mode 100644 index 907c8369786e4cfe29526d2a2418bf055e20a6c8..0000000000000000000000000000000000000000 --- a/test_tipc/config/DarkNet/DarkNet53_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DarkNet53 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DarkNet53_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DarkNet/DarkNet53_train_infer_python.txt b/test_tipc/config/DarkNet/DarkNet53_train_infer_python.txt deleted file mode 100644 index 31e38095e8880c1cd6689622b6d0a2aa24fac080..0000000000000000000000000000000000000000 --- a/test_tipc/config/DarkNet/DarkNet53_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DarkNet53 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DarkNet53_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,256,256]}] \ No newline at end of file diff --git a/test_tipc/config/DeiT/DeiT_base_patch16_224_train_amp_infer_python.txt b/test_tipc/config/DeiT/DeiT_base_patch16_224_train_amp_infer_python.txt deleted file mode 100644 index 1c52e7bb689f81d40b90c2f3c274fbe6f538c4e3..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_base_patch16_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_base_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DeiT/DeiT_base_patch16_224_train_infer_python.txt b/test_tipc/config/DeiT/DeiT_base_patch16_224_train_infer_python.txt deleted file mode 100644 index 3a436eb2e5cd1a87efbbc3d46bdc4ee129c0693f..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_base_patch16_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_base_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DeiT/DeiT_base_patch16_384_train_amp_infer_python.txt b/test_tipc/config/DeiT/DeiT_base_patch16_384_train_amp_infer_python.txt deleted file mode 100644 index 964e8eadbcda29764c28d646a0f1984cf85f8195..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_base_patch16_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_base_patch16_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DeiT/DeiT_base_patch16_384_train_infer_python.txt b/test_tipc/config/DeiT/DeiT_base_patch16_384_train_infer_python.txt deleted file mode 100644 index c1ee64b3bfe6482e72f10803e8fd955ae9c19896..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_base_patch16_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_base_patch16_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/config/DeiT/DeiT_small_patch16_224_train_amp_infer_python.txt b/test_tipc/config/DeiT/DeiT_small_patch16_224_train_amp_infer_python.txt deleted file mode 100644 index 32b63d774f447937d0863d7b8ffb85b290e15a2e..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_small_patch16_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_small_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DeiT/DeiT_small_patch16_224_train_infer_python.txt b/test_tipc/config/DeiT/DeiT_small_patch16_224_train_infer_python.txt deleted file mode 100644 index 37bdad714cd9bfead43c0f984b4cb2c36f206d4e..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_small_patch16_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_small_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DeiT/DeiT_tiny_patch16_224_train_amp_infer_python.txt b/test_tipc/config/DeiT/DeiT_tiny_patch16_224_train_amp_infer_python.txt deleted file mode 100644 index 769f086946997e78bd0f8b0bfd83512d855c6ed3..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_tiny_patch16_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_tiny_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DeiT/DeiT_tiny_patch16_224_train_infer_python.txt b/test_tipc/config/DeiT/DeiT_tiny_patch16_224_train_infer_python.txt deleted file mode 100644 index e262b871651de68e80e37264a334040e923521bd..0000000000000000000000000000000000000000 --- a/test_tipc/config/DeiT/DeiT_tiny_patch16_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DeiT_tiny_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DenseNet/DenseNet121_train_amp_infer_python.txt b/test_tipc/config/DenseNet/DenseNet121_train_amp_infer_python.txt deleted file mode 100644 index e66857b9d6f5527efa213faf2489c45d52a7eef6..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet121_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet121 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DenseNet/DenseNet121_train_infer_python.txt b/test_tipc/config/DenseNet/DenseNet121_train_infer_python.txt deleted file mode 100644 index 17a24fbaf250ce7d9988f15c48847efdc663606d..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet121_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet121 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DenseNet/DenseNet161_train_amp_infer_python.txt b/test_tipc/config/DenseNet/DenseNet161_train_amp_infer_python.txt deleted file mode 100644 index fa1a0a7e33973a575d379ec0b02b9fd6c7eef217..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet161_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet161 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DenseNet/DenseNet161_train_infer_python.txt b/test_tipc/config/DenseNet/DenseNet161_train_infer_python.txt deleted file mode 100644 index 3d2c8519e61a5c6f5e2c819d50d53012e097f01d..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet161_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet161 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DenseNet/DenseNet169_train_amp_infer_python.txt b/test_tipc/config/DenseNet/DenseNet169_train_amp_infer_python.txt deleted file mode 100644 index 55c4d6ec86540c175832c61af03006c5813ee184..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet169_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet169 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DenseNet/DenseNet169_train_infer_python.txt b/test_tipc/config/DenseNet/DenseNet169_train_infer_python.txt deleted file mode 100644 index f962f66e4fbe71f7fcdfa355b8af101a328c83d9..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet169_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet169 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DenseNet/DenseNet201_train_amp_infer_python.txt b/test_tipc/config/DenseNet/DenseNet201_train_amp_infer_python.txt deleted file mode 100644 index 58250ce15a9227e9adcb8c7f2faaf89ded4b7062..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet201_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet201 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DenseNet/DenseNet201_train_infer_python.txt b/test_tipc/config/DenseNet/DenseNet201_train_infer_python.txt deleted file mode 100644 index b1bc6ad7b844826b5752ab64ee70010ce15e8d03..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet201_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet201 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/DenseNet/DenseNet264_train_amp_infer_python.txt b/test_tipc/config/DenseNet/DenseNet264_train_amp_infer_python.txt deleted file mode 100644 index 26138926314320c8c9b5ee3831a06e036142d689..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet264_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet264 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/DenseNet/DenseNet264_train_infer_python.txt b/test_tipc/config/DenseNet/DenseNet264_train_infer_python.txt deleted file mode 100644 index ff95b9d8e046d098c6f36ff54596e181b3f6e934..0000000000000000000000000000000000000000 --- a/test_tipc/config/DenseNet/DenseNet264_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DenseNet264 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Distillation/resnet34_distill_resnet18_dkd_train_amp_infer_python.txt b/test_tipc/config/Distillation/resnet34_distill_resnet18_dkd_train_amp_infer_python.txt deleted file mode 100644 index ab94039471ba1abaf035600a3351656a3f4e0f25..0000000000000000000000000000000000000000 --- a/test_tipc/config/Distillation/resnet34_distill_resnet18_dkd_train_amp_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DistillationModel -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/Distillation/resnet34_distill_resnet18_dkd_train_infer_python.txt b/test_tipc/config/Distillation/resnet34_distill_resnet18_dkd_train_infer_python.txt deleted file mode 100644 index 4b216a9f0c2fa15811617575aefa772aa7dab313..0000000000000000000000000000000000000000 --- a/test_tipc/config/Distillation/resnet34_distill_resnet18_dkd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:DistillationModel -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/ESNet/ESNet_x0_25_train_amp_infer_python.txt b/test_tipc/config/ESNet/ESNet_x0_25_train_amp_infer_python.txt deleted file mode 100644 index 7e9abcf47db884882e23008c9909dd3f334f4773..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x0_25_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ESNet/ESNet_x0_25_train_infer_python.txt b/test_tipc/config/ESNet/ESNet_x0_25_train_infer_python.txt deleted file mode 100644 index df1f896da971a61dd3743830a3b25dfd50b087d2..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x0_25_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ESNet/ESNet_x0_5_train_amp_infer_python.txt b/test_tipc/config/ESNet/ESNet_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 7cd576052d101c9dbc065cfe6dae6a290a5f1490..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ESNet/ESNet_x0_5_train_infer_python.txt b/test_tipc/config/ESNet/ESNet_x0_5_train_infer_python.txt deleted file mode 100644 index b8aa0bc75ed239c7f106607f9f67b4fe6891035e..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x0_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ESNet/ESNet_x0_75_train_amp_infer_python.txt b/test_tipc/config/ESNet/ESNet_x0_75_train_amp_infer_python.txt deleted file mode 100644 index aa715a5d0165b93e5d4808353ac4a75cda974064..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x0_75_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ESNet/ESNet_x0_75_train_infer_python.txt b/test_tipc/config/ESNet/ESNet_x0_75_train_infer_python.txt deleted file mode 100644 index 713086fe05ea65d33d53f700d363349d96e365c7..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x0_75_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ESNet/ESNet_x1_0_train_amp_infer_python.txt b/test_tipc/config/ESNet/ESNet_x1_0_train_amp_infer_python.txt deleted file mode 100644 index 8ce5c4ca603eca59b8f4d7df3deb2fa31ca0ad3f..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ESNet/ESNet_x1_0_train_infer_python.txt b/test_tipc/config/ESNet/ESNet_x1_0_train_infer_python.txt deleted file mode 100644 index 31e11cff36f60eea65c97e7431b6f39a60d9695e..0000000000000000000000000000000000000000 --- a/test_tipc/config/ESNet/ESNet_x1_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ESNet_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB0_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB0_train_amp_infer_python.txt deleted file mode 100644 index effea01493bf4ee309feea2ab71947117621df74..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB0_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB0_train_infer_python.txt deleted file mode 100644 index fe9660749f84853ac5e9111427ca43cfe8740bac..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB1_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB1_train_amp_infer_python.txt deleted file mode 100644 index ba5fa4d2067ddd98058ecc0b762292bf27219cb7..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB1_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB1 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB1_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=272 -o PreProcess.transform_ops.1.CropImage.size=240 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB1_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB1_train_infer_python.txt deleted file mode 100644 index 302b52b5175a5f012c4d0c4c0ca993e0a26baca0..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB1_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB1 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB1_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=272 -o PreProcess.transform_ops.1.CropImage.size=240 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,240,240]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB2_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB2_train_amp_infer_python.txt deleted file mode 100644 index a045ea9fc1ff69b039b78f92dd448e3fc53927cb..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB2_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB2 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB2_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=260 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB2_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB2_train_infer_python.txt deleted file mode 100644 index 116f9ca21a6294815a6dd1dd57406e43fc029e95..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB2_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB2 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB2_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=260 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,260,260]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB3_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB3_train_amp_infer_python.txt deleted file mode 100644 index dc3a03297aee923f0476c6193b14482da89292bb..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB3_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=332 -o PreProcess.transform_ops.1.CropImage.size=300 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB3_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB3_train_infer_python.txt deleted file mode 100644 index 3e5369b56b3ba2d0707120b1f07e71262cf49cc5..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB3_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=332 -o PreProcess.transform_ops.1.CropImage.size=300 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,300,300]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB4_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB4_train_amp_infer_python.txt deleted file mode 100644 index 1c63f3b742101adff3015614782065a3a6f64094..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB4_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB4 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB4_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=412 -o PreProcess.transform_ops.1.CropImage.size=380 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB4_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB4_train_infer_python.txt deleted file mode 100644 index 51b439e459d57800904b6aadc7f7c44ec37038c4..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB4_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB4 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB4_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=412 -o PreProcess.transform_ops.1.CropImage.size=380 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,380,380]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB5_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB5_train_amp_infer_python.txt deleted file mode 100644 index 60d0eeac26259f0a986332dfb19b849a25d2ae08..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=488 -o PreProcess.transform_ops.1.CropImage.size=456 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB5_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB5_train_infer_python.txt deleted file mode 100644 index 77ac03863e989f359b1f3f5fa063da4ed721a4f1..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=488 -o PreProcess.transform_ops.1.CropImage.size=456 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,456,456]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB6_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB6_train_amp_infer_python.txt deleted file mode 100644 index ce07cce099b56e668e54672a4783ebaa8041fe25..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB6_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB6 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB6_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=560 -o PreProcess.transform_ops.1.CropImage.size=528 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB6_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB6_train_infer_python.txt deleted file mode 100644 index 73c88c4dfbb975dd5585e207b6d9c09017de94b2..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB6_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB6 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB6_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=560 -o PreProcess.transform_ops.1.CropImage.size=528 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,528,528]}] \ No newline at end of file diff --git a/test_tipc/config/EfficientNet/EfficientNetB7_train_amp_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB7_train_amp_infer_python.txt deleted file mode 100644 index d8eff7add7411b105b0996c5d91166e4689f6ef4..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB7_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB7 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=632 -o PreProcess.transform_ops.1.CropImage.size=600 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/EfficientNet/EfficientNetB7_train_infer_python.txt b/test_tipc/config/EfficientNet/EfficientNetB7_train_infer_python.txt deleted file mode 100644 index dd70d66b2a0a48ce1f33104bbcfef383b82a3258..0000000000000000000000000000000000000000 --- a/test_tipc/config/EfficientNet/EfficientNetB7_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:EfficientNetB7 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=632 -o PreProcess.transform_ops.1.CropImage.size=600 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,600,600]}] \ No newline at end of file diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_amp_infer_python.txt b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_amp_infer_python.txt deleted file mode 100644 index 2420e06e03fa4abe66e678a7a06940a391a590a9..0000000000000000000000000000000000000000 --- a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:GeneralRecognition_PPLCNet_x2_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_rec.py -c configs/inference_rec.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.rec_inference_model_dir:../inference --o Global.infer_imgs:../dataset/Aliproduct/demo_test/ --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt deleted file mode 100644 index cdcb9f737d8c1f6be29ff38f9be187d6130a1c7b..0000000000000000000000000000000000000000 --- a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:GeneralRecognition_PPLCNet_x2_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_rec.py -c configs/inference_rec.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.rec_inference_model_dir:../inference --o Global.infer_imgs:../dataset/Aliproduct/demo_test/ --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/GhostNet/GhostNet_x0_5_train_amp_infer_python.txt b/test_tipc/config/GhostNet/GhostNet_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 0861f243efb67150067ff694cc7be2fe11ef080c..0000000000000000000000000000000000000000 --- a/test_tipc/config/GhostNet/GhostNet_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:GhostNet_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/GhostNet/GhostNet_x0_5_train_infer_python.txt b/test_tipc/config/GhostNet/GhostNet_x0_5_train_infer_python.txt deleted file mode 100644 index 5207cdbee52e14412523e6124d94b070b41f8036..0000000000000000000000000000000000000000 --- a/test_tipc/config/GhostNet/GhostNet_x0_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:GhostNet_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/GhostNet/GhostNet_x1_0_train_amp_infer_python.txt b/test_tipc/config/GhostNet/GhostNet_x1_0_train_amp_infer_python.txt deleted file mode 100644 index 3132830ca27910adce290b231257ebd4e1506f79..0000000000000000000000000000000000000000 --- a/test_tipc/config/GhostNet/GhostNet_x1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:GhostNet_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/GhostNet/GhostNet_x1_0_train_infer_python.txt b/test_tipc/config/GhostNet/GhostNet_x1_0_train_infer_python.txt deleted file mode 100644 index 600f238902823521de8ab7816d4fab92d25be9c5..0000000000000000000000000000000000000000 --- a/test_tipc/config/GhostNet/GhostNet_x1_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:GhostNet_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/GhostNet/GhostNet_x1_3_train_amp_infer_python.txt b/test_tipc/config/GhostNet/GhostNet_x1_3_train_amp_infer_python.txt deleted file mode 100644 index e5ad93bc1fc381add7241943744f731f6cb75bf4..0000000000000000000000000000000000000000 --- a/test_tipc/config/GhostNet/GhostNet_x1_3_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:GhostNet_x1_3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/GhostNet/GhostNet_x1_3_train_infer_python.txt b/test_tipc/config/GhostNet/GhostNet_x1_3_train_infer_python.txt deleted file mode 100644 index f859260b60b4d830ff1c88b468e736b3074fc945..0000000000000000000000000000000000000000 --- a/test_tipc/config/GhostNet/GhostNet_x1_3_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:GhostNet_x1_3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HRNet/HRNet_W18_C_train_amp_infer_python.txt b/test_tipc/config/HRNet/HRNet_W18_C_train_amp_infer_python.txt deleted file mode 100644 index 72014b2741530ca16aa508bc61e1f6d6253f0d20..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W18_C_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W18_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HRNet/HRNet_W18_C_train_infer_python.txt b/test_tipc/config/HRNet/HRNet_W18_C_train_infer_python.txt deleted file mode 100644 index a98161cd26ad75b589778c79144428c946066703..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W18_C_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W18_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HRNet/HRNet_W30_C_train_amp_infer_python.txt b/test_tipc/config/HRNet/HRNet_W30_C_train_amp_infer_python.txt deleted file mode 100644 index 0a38ecc036167f4d35592d78ae9b51c63e096cce..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W30_C_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W30_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W30_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HRNet/HRNet_W30_C_train_infer_python.txt b/test_tipc/config/HRNet/HRNet_W30_C_train_infer_python.txt deleted file mode 100644 index 6376de04fd54dd337d018e26b58da0e8a67302a2..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W30_C_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W30_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W30_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HRNet/HRNet_W32_C_train_amp_infer_python.txt b/test_tipc/config/HRNet/HRNet_W32_C_train_amp_infer_python.txt deleted file mode 100644 index bcdd374618799e8f7cc519f77f40b36ad3a57e51..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W32_C_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W32_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W32_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HRNet/HRNet_W32_C_train_infer_python.txt b/test_tipc/config/HRNet/HRNet_W32_C_train_infer_python.txt deleted file mode 100644 index e4aef325e33d326fbbffdcfe5ad5ea2d072f0e62..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W32_C_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W32_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W32_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HRNet/HRNet_W40_C_train_amp_infer_python.txt b/test_tipc/config/HRNet/HRNet_W40_C_train_amp_infer_python.txt deleted file mode 100644 index 3825c10286046c98b06aadc17d1581e293967d59..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W40_C_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W40_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W40_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HRNet/HRNet_W40_C_train_infer_python.txt b/test_tipc/config/HRNet/HRNet_W40_C_train_infer_python.txt deleted file mode 100644 index 9ad57d9b0be126b3e8bf538a7c9dfd43b59fe109..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W40_C_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W40_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W40_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HRNet/HRNet_W44_C_train_amp_infer_python.txt b/test_tipc/config/HRNet/HRNet_W44_C_train_amp_infer_python.txt deleted file mode 100644 index f7bdb981a921b7971969b25e28c2ab56535546f6..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W44_C_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W44_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W44_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HRNet/HRNet_W44_C_train_infer_python.txt b/test_tipc/config/HRNet/HRNet_W44_C_train_infer_python.txt deleted file mode 100644 index 14710ed597eb732b1ef9e8ebb5a0bcaf3ba8577b..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W44_C_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W44_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W44_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HRNet/HRNet_W48_C_train_amp_infer_python.txt b/test_tipc/config/HRNet/HRNet_W48_C_train_amp_infer_python.txt deleted file mode 100644 index 56cc623d73b357c873d3f75535cfc62882030db5..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W48_C_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W48_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HRNet/HRNet_W48_C_train_infer_python.txt b/test_tipc/config/HRNet/HRNet_W48_C_train_infer_python.txt deleted file mode 100644 index f85ce5a5e2893c1b4c41e2370802dc3d36a55bd7..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W48_C_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W48_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:64|128 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HRNet/HRNet_W64_C_train_amp_infer_python.txt b/test_tipc/config/HRNet/HRNet_W64_C_train_amp_infer_python.txt deleted file mode 100644 index daf5d0ab12fc1c0fd3a29a677dd178e5a16c1432..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W64_C_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W64_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HRNet/HRNet_W64_C_train_infer_python.txt b/test_tipc/config/HRNet/HRNet_W64_C_train_infer_python.txt deleted file mode 100644 index 75b19de8f3bd36fe21db217a211d066f3d8516aa..0000000000000000000000000000000000000000 --- a/test_tipc/config/HRNet/HRNet_W64_C_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HRNet_W64_C -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HarDNet/HarDNet39_ds_train_amp_infer_python.txt b/test_tipc/config/HarDNet/HarDNet39_ds_train_amp_infer_python.txt deleted file mode 100644 index 122dafe1a411b40210ae74864aee4e1e2b1f0f3e..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet39_ds_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet39_ds -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet39_ds_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HarDNet/HarDNet39_ds_train_infer_python.txt b/test_tipc/config/HarDNet/HarDNet39_ds_train_infer_python.txt deleted file mode 100644 index 1d2b13eccfbbbca76550115748c5213dac8b32e4..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet39_ds_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet39_ds -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet39_ds_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HarDNet/HarDNet68_ds_train_amp_infer_python.txt b/test_tipc/config/HarDNet/HarDNet68_ds_train_amp_infer_python.txt deleted file mode 100644 index 8d27c482d6496430a0f982e0031cb503036caab4..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet68_ds_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet68_ds -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_ds_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HarDNet/HarDNet68_ds_train_infer_python.txt b/test_tipc/config/HarDNet/HarDNet68_ds_train_infer_python.txt deleted file mode 100644 index 0e6172a0cd768060e15bd06052a2b9fe22a5ea8e..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet68_ds_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet68_ds -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_ds_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HarDNet/HarDNet68_train_amp_infer_python.txt b/test_tipc/config/HarDNet/HarDNet68_train_amp_infer_python.txt deleted file mode 100644 index 88c82cc41e2c09ee53ca6999a8c14a64125db6e3..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet68_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet68 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HarDNet/HarDNet68_train_infer_python.txt b/test_tipc/config/HarDNet/HarDNet68_train_infer_python.txt deleted file mode 100644 index 7211e8b738aaf5c636606528a98dcee404ee1056..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet68_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet68 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/HarDNet/HarDNet85_train_amp_infer_python.txt b/test_tipc/config/HarDNet/HarDNet85_train_amp_infer_python.txt deleted file mode 100644 index 5c3a73dfa2eacb6b35c2dd296dc3126d921acd75..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet85_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet85 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/HarDNet/HarDNet85_train_infer_python.txt b/test_tipc/config/HarDNet/HarDNet85_train_infer_python.txt deleted file mode 100644 index 5371fe9321135a7c953e81a3d9f42d1c456c86b4..0000000000000000000000000000000000000000 --- a/test_tipc/config/HarDNet/HarDNet85_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:HarDNet85 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Inception/GoogLeNet_train_amp_infer_python.txt b/test_tipc/config/Inception/GoogLeNet_train_amp_infer_python.txt deleted file mode 100644 index de01f30b696586f484f7b1921536a3189eb588ae..0000000000000000000000000000000000000000 --- a/test_tipc/config/Inception/GoogLeNet_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:GoogLeNet -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Inception/GoogLeNet_train_infer_python.txt b/test_tipc/config/Inception/GoogLeNet_train_infer_python.txt deleted file mode 100644 index 0ce44c563ea95ddf41630394d39fd8f42e7a6924..0000000000000000000000000000000000000000 --- a/test_tipc/config/Inception/GoogLeNet_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:GoogLeNet -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Inception/InceptionV3_train_amp_infer_python.txt b/test_tipc/config/Inception/InceptionV3_train_amp_infer_python.txt deleted file mode 100644 index ce9c3477bef4287532d6169b6e1f8780160d38ae..0000000000000000000000000000000000000000 --- a/test_tipc/config/Inception/InceptionV3_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:InceptionV3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Inception/InceptionV3_train_infer_python.txt b/test_tipc/config/Inception/InceptionV3_train_infer_python.txt deleted file mode 100644 index 64e857a5878ca79763e749562a988325ecbf5569..0000000000000000000000000000000000000000 --- a/test_tipc/config/Inception/InceptionV3_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:InceptionV3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/config/Inception/InceptionV4_train_amp_infer_python.txt b/test_tipc/config/Inception/InceptionV4_train_amp_infer_python.txt deleted file mode 100644 index 853c1b7da138ecdbea17492e2e5d83515f9887fc..0000000000000000000000000000000000000000 --- a/test_tipc/config/Inception/InceptionV4_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:InceptionV4 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Inception/InceptionV4_train_infer_python.txt b/test_tipc/config/Inception/InceptionV4_train_infer_python.txt deleted file mode 100644 index 75826301ef65fbe0fc4dcf3fe2407deb4881e099..0000000000000000000000000000000000000000 --- a/test_tipc/config/Inception/InceptionV4_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:InceptionV4 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/config/LeViT/LeViT_128S_train_amp_infer_python.txt b/test_tipc/config/LeViT/LeViT_128S_train_amp_infer_python.txt deleted file mode 100644 index d973ec5174940d92c629bf5be745a74c4d73a3d7..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_128S_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_128S -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128S_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/LeViT/LeViT_128S_train_infer_python.txt b/test_tipc/config/LeViT/LeViT_128S_train_infer_python.txt deleted file mode 100644 index 0b3daa1e6f05c8dc086cd09397f0311054f61fee..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_128S_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_128S -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128S_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/LeViT/LeViT_128_train_amp_infer_python.txt b/test_tipc/config/LeViT/LeViT_128_train_amp_infer_python.txt deleted file mode 100644 index fb884b24b97cec70913380aee85f28e0c1d66f5f..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_128_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_128 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/LeViT/LeViT_128_train_infer_python.txt b/test_tipc/config/LeViT/LeViT_128_train_infer_python.txt deleted file mode 100644 index 515b109d74b2e4000dd58d57945cd4e2a4024955..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_128_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_128 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/LeViT/LeViT_192_train_amp_infer_python.txt b/test_tipc/config/LeViT/LeViT_192_train_amp_infer_python.txt deleted file mode 100644 index 0ecb7e096a8983b714d489a15176df92ebd2b1fa..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_192_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_192 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_192_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/LeViT/LeViT_192_train_infer_python.txt b/test_tipc/config/LeViT/LeViT_192_train_infer_python.txt deleted file mode 100644 index 107977ceadc93a3b40d25b01cbe311871b24675d..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_192_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_192 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_192_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/LeViT/LeViT_256_train_amp_infer_python.txt b/test_tipc/config/LeViT/LeViT_256_train_amp_infer_python.txt deleted file mode 100644 index dbea83a768e91287e98bfdf6125bf1d786f0ded5..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_256_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_256 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_256_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/LeViT/LeViT_256_train_infer_python.txt b/test_tipc/config/LeViT/LeViT_256_train_infer_python.txt deleted file mode 100644 index 18bee51e16a6e86aa09a994abece11cbb462cf00..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_256_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_256 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_256_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/LeViT/LeViT_384_train_amp_infer_python.txt b/test_tipc/config/LeViT/LeViT_384_train_amp_infer_python.txt deleted file mode 100644 index c7a243e79f07e62cc3c39680398889101bd2cb1d..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/LeViT/LeViT_384_train_infer_python.txt b/test_tipc/config/LeViT/LeViT_384_train_infer_python.txt deleted file mode 100644 index 968b767dbd504917ae4ef93aad914bffe8e95606..0000000000000000000000000000000000000000 --- a/test_tipc/config/LeViT/LeViT_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:LeViT_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MixNet/MixNet_L_train_amp_infer_python.txt b/test_tipc/config/MixNet/MixNet_L_train_amp_infer_python.txt deleted file mode 100644 index a70575726f4690e0abe8c698c7674918a3c64dc4..0000000000000000000000000000000000000000 --- a/test_tipc/config/MixNet/MixNet_L_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MixNet_L -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MixNet/MixNet_L_train_infer_python.txt b/test_tipc/config/MixNet/MixNet_L_train_infer_python.txt deleted file mode 100644 index 26e2ba0ce3f32c932196a7c338b00da29f9b8265..0000000000000000000000000000000000000000 --- a/test_tipc/config/MixNet/MixNet_L_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MixNet_L -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MixNet/MixNet_M_train_amp_infer_python.txt b/test_tipc/config/MixNet/MixNet_M_train_amp_infer_python.txt deleted file mode 100644 index 66c5e83d7d6a64281e3c8c48f3a75641dd2ba826..0000000000000000000000000000000000000000 --- a/test_tipc/config/MixNet/MixNet_M_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MixNet_M -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MixNet/MixNet_M_train_infer_python.txt b/test_tipc/config/MixNet/MixNet_M_train_infer_python.txt deleted file mode 100644 index c214185178d2ce17df7068cf53a73948de9f42df..0000000000000000000000000000000000000000 --- a/test_tipc/config/MixNet/MixNet_M_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MixNet_M -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MixNet/MixNet_S_train_amp_infer_python.txt b/test_tipc/config/MixNet/MixNet_S_train_amp_infer_python.txt deleted file mode 100644 index ac256e161cf75a7dc1d3978af7ba2a64fd1c2cc9..0000000000000000000000000000000000000000 --- a/test_tipc/config/MixNet/MixNet_S_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MixNet_S -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_S_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MixNet/MixNet_S_train_infer_python.txt b/test_tipc/config/MixNet/MixNet_S_train_infer_python.txt deleted file mode 100644 index 77cb00769b7c15b4ef3473b63a19e584490dea3c..0000000000000000000000000000000000000000 --- a/test_tipc/config/MixNet/MixNet_S_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MixNet_S -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_S_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_train_amp_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_train_amp_infer_python.txt deleted file mode 100644 index db98a5f6ed047a7236d0a7a1bbfa91e0f5093995..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_train_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_train_infer_python.txt deleted file mode 100644 index ac12953ddb4070265b7ebe714182c79fe070138a..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:64|128 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_x0_25_train_amp_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_x0_25_train_amp_infer_python.txt deleted file mode 100644 index 68106952dfb6612916963c5c1f6d7c932eeffe23..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_x0_25_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_x0_25_train_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_x0_25_train_infer_python.txt deleted file mode 100644 index 5dd3399159d6377cab5e38ce101e1f128cac0ca2..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_x0_25_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_x0_5_train_amp_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 0552171764cdaeb448267cf5c95988354d0691bc..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_x0_5_train_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_x0_5_train_infer_python.txt deleted file mode 100644 index fa8e48f42357930f214c23190f947d5de2df5f61..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_x0_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_x0_75_train_amp_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_x0_75_train_amp_infer_python.txt deleted file mode 100644 index 4d876f8fb50e93fdfd53a9bcbb2f8c8f13a687e4..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_x0_75_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV1/MobileNetV1_x0_75_train_infer_python.txt b/test_tipc/config/MobileNetV1/MobileNetV1_x0_75_train_infer_python.txt deleted file mode 100644 index f775c3f850ff35043920f478ecb66b5f5c77d31e..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV1/MobileNetV1_x0_75_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV1_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_train_amp_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_train_amp_infer_python.txt deleted file mode 100644 index 840a0f598abf0bd047f7ed8cf7085974fb856e5f..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_train_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_train_infer_python.txt deleted file mode 100644 index eef9b575203102f55d335c43ba6bda353581df53..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:64|128 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x0_25_train_amp_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x0_25_train_amp_infer_python.txt deleted file mode 100644 index 1c2718f405a059c9eebdd7e0ca60edd17e065a8a..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x0_25_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x0_25_train_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x0_25_train_infer_python.txt deleted file mode 100644 index 98830b6ed6569e541c1d8c35d69da9c1370cefd4..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x0_25_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x0_5_train_amp_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 03f8073e5e1047be5024dc04c7a34ba8943246c0..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x0_5_train_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x0_5_train_infer_python.txt deleted file mode 100644 index 8593560ffba05496fccee465854aac5c9ab158d3..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x0_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x0_75_train_amp_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x0_75_train_amp_infer_python.txt deleted file mode 100644 index 93547f8dc4e19ffa13bb4662619e9fb908a67dbb..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x0_75_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x0_75_train_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x0_75_train_infer_python.txt deleted file mode 100644 index 657e76a3f82605bf0f443078b8bb15a8e9ec182b..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x0_75_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x1_5_train_amp_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x1_5_train_amp_infer_python.txt deleted file mode 100644 index 948db736c5d1e287a1d8c9378f1106365a30a3ad..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x1_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x1_5_train_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x1_5_train_infer_python.txt deleted file mode 100644 index f49d684ad70cb88b36df6e2c7c095a26e97b329c..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x1_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x2_0_train_amp_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x2_0_train_amp_infer_python.txt deleted file mode 100644 index 6b1b20096b72760432ee59dc6ebc21028eee1b86..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x2_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV2/MobileNetV2_x2_0_train_infer_python.txt b/test_tipc/config/MobileNetV2/MobileNetV2_x2_0_train_infer_python.txt deleted file mode 100644 index 7fc9c6e9108f32f310e6ebff12c6191205164476..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV2/MobileNetV2_x2_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV2_x2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_35_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_35_train_amp_infer_python.txt deleted file mode 100644 index 72c0b5f4de86fd39d1c634f8d7f1afee36ec74ff..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_35_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x0_35 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_35_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_35_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_35_train_infer_python.txt deleted file mode 100644 index 138b0b82245d24cd2ed19c2afd38751458c0bf11..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_35_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x0_35 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_35_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_5_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 28cd39dd69bfbb065cbfae257892ad9540d5a5e3..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_5_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_5_train_infer_python.txt deleted file mode 100644 index a01e1b1ca252fa2309522563d0f65f9002c49a0f..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_5_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_75_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_75_train_amp_infer_python.txt deleted file mode 100644 index f3749084578921829b31a896adc2906d0afc2ca4..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_75_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_75_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_75_train_infer_python.txt deleted file mode 100644 index 90dccf305200619b4b573b4e88df7720bc58b39a..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x0_75_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_FPGM_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_FPGM_train_amp_infer_python.txt deleted file mode 100644 index 5c6dd683c4dfde02de4747e9429bdb6a9f37a1f2..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_FPGM_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x1_0_FPGM -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_train_amp_infer_python.txt deleted file mode 100644 index 20f2d7b947f54744be0789742b5196495f7816e0..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_PACT_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x1_0_PACT -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_amp_infer_python.txt deleted file mode 100644 index 61e864c79e0b905e55d0c83e90980f90e1ae28db..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt deleted file mode 100644 index 980f6226df6998d2cbc0a0858f628965525f41f9..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -fpgm_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:256|640 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt deleted file mode 100644 index f251e9c9f850e58932f7dbb90b76a52c0eb7782f..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:amp --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -fpgm_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference -export2:null -inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/MobileNetV3_large_x1_0_inference.tar -infer_model:../inference/ -infer_export:null -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_25_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_25_train_amp_infer_python.txt deleted file mode 100644 index 3b3ee3adad01e951615f10d57ce1e7e43cd64315..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_25_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x1_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_25_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_25_train_infer_python.txt deleted file mode 100644 index 1835f2d92cbf01d4a630a0367d528687477631ba..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_25_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_large_x1_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_35_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_35_train_amp_infer_python.txt deleted file mode 100644 index b42def4c8408d4570f03ddfe3337f704227d5fe4..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_35_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x0_35 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_35_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_35_train_infer_python.txt deleted file mode 100644 index 2f184e9272ace1a72186f14ab516225d68ef35a0..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_35_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x0_35 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_5_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 1382431826b34703e0e59fd1524390a1a9b4aeb3..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_5_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_5_train_infer_python.txt deleted file mode 100644 index 4289fa00d29a5b46f5044fcbbdb2dd61c350379a..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_5_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_75_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_75_train_amp_infer_python.txt deleted file mode 100644 index 489b9408fdf2df35159e4e349b21f8c307a5a7b7..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_75_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_75_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_75_train_infer_python.txt deleted file mode 100644 index c49efec3e8187344e4e41bbe4d0398fc2b4c8ac2..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x0_75_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_0_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_0_train_amp_infer_python.txt deleted file mode 100644 index baa1546daa565741884f93121739b361dff4ad88..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_0_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_0_train_infer_python.txt deleted file mode 100644 index 6741d22b3536512471aa54b0ede5a2259f2caaa7..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_0_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_25_train_amp_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_25_train_amp_infer_python.txt deleted file mode 100644 index e34e17696b728c9071cf3bd6d65e4cd404b7c348..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_25_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x1_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_25_train_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_25_train_infer_python.txt deleted file mode 100644 index fb6a02d2a7bd80229a1e70b8702eeb7bf3303240..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileNetV3/MobileNetV3_small_x1_25_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:MobileNetV3_small_x1_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileViT/MobileViT_S_train_infer_python.txt b/test_tipc/config/MobileViT/MobileViT_S_train_infer_python.txt deleted file mode 100644 index 619416b3a86dc029f66dc0a3b47b322ca827fb90..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileViT/MobileViT_S_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:MobileViT_S -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_S.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_S.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:128 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,256,256]}] diff --git a/test_tipc/config/MobileViT/MobileViT_XS_train_infer_python.txt b/test_tipc/config/MobileViT/MobileViT_XS_train_infer_python.txt deleted file mode 100644 index b7436adaaac2a94df9692845b92bc5c46887ba62..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileViT/MobileViT_XS_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileViT_XS -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XS.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XS.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XS.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,256,256]}] diff --git a/test_tipc/config/MobileViT/MobileViT_XXS_train_infer_python.txt b/test_tipc/config/MobileViT/MobileViT_XXS_train_infer_python.txt deleted file mode 100644 index 55cb442148ece745be4e2de6d95e2bcaf5fb0074..0000000000000000000000000000000000000000 --- a/test_tipc/config/MobileViT/MobileViT_XXS_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:MobileViT_XXS -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XXS.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XXS.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XXS.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,256,256]}] diff --git a/test_tipc/config/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt deleted file mode 100644 index b86aa9b1cffb58e2579b46e726e70a3d6f3b1790..0000000000000000000000000000000000000000 --- a/test_tipc/config/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt +++ /dev/null @@ -1,19 +0,0 @@ -===========================cpp_infer_params=========================== -model_name:PPShiTu -cpp_infer_type:shitu -feature_inference_model_dir:./feature_inference/ -det_inference_model_dir:./det_inference -cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar -det_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar -infer_quant:False -inference_cmd:./deploy/cpp_shitu/build/pp_shitu -c inference_drink.yaml -use_gpu:True|False -enable_mkldnn:True|False -cpu_threads:1|6 -batch_size:1 -use_tensorrt:False|True -precision:fp32|fp16 -data_dir:./dataset/drink_dataset_v1.0 -benchmark:True -generate_yaml_cmd:python3 test_tipc/generate_cpp_yaml.py -transform_index_cmd:python3 deploy/cpp_shitu/tools/transform_id_map.py -c inference_drink.yaml diff --git a/test_tipc/config/PPLCNet/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt b/test_tipc/config/PPLCNet/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt deleted file mode 100644 index b45c2a01b2cb0d9491b516e2caf410ef04e7d35e..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt +++ /dev/null @@ -1,8 +0,0 @@ -runtime_device:arm_cpu -lite_arm_work_path:/data/local/tmp/arm_cpu/ -lite_arm_so_path:inference_lite_lib.android.armv8/cxx/lib/libpaddle_light_api_shared.so -clas_model_file:MobileNetV3_large_x1_0 -inference_cmd:clas_system config.txt tabby_cat.jpg ---num_threads_list:1 ---batch_size_list:1 ---precision_list:FP32 diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_25_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_25_train_amp_infer_python.txt deleted file mode 100644 index 1977e30f5f91e1a39b805e205279d59e8e81e570..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_25_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_25_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_25_train_infer_python.txt deleted file mode 100644 index b1596ee33ccade9fe3e8347ded973c3ccad35b67..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_25_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_35_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_35_train_amp_infer_python.txt deleted file mode 100644 index ad2ceb210308db6679d0904bb3ed1c7c494839af..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_35_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_35 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_35_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_35_train_infer_python.txt deleted file mode 100644 index 57d8aa324c5175380053fa3ddec12f0e867dfb99..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_35_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_35 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_5_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 6f5318b8a965543cc88b4864b9e486250b1d6999..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_5_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_5_train_infer_python.txt deleted file mode 100644 index 025fa994ebab8b9e9472ef62ef8fe7edc4c57c87..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_5_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_75_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_75_train_amp_infer_python.txt deleted file mode 100644 index cfda57015d918b79bc3ab9103314b53803544c39..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_75_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_75_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x0_75_train_infer_python.txt deleted file mode 100644 index 3f16d3640e06a6f1fd87c4946e6d8d2efb4d9822..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x0_75_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x0_75 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_amp_infer_python.txt deleted file mode 100644 index c335e54284af5eaeeb321960ba2b27b553a04ca4..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_infer_python.txt deleted file mode 100644 index f17667e285652efbeea24f974b55982541762bda..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_5_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_5_train_amp_infer_python.txt deleted file mode 100644 index 6e170df68bd3d238e773744851795d62b09283f2..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x1_5_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_5_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_5_train_infer_python.txt deleted file mode 100644 index 6a40f04f2d96697998ecc30e7a9b1fbffdfd1941..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x1_5_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_0_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x2_0_train_amp_infer_python.txt deleted file mode 100644 index b3a6f7b57ff8e40e68ee42a4ae6487a56096c0f9..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x2_0_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_0_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x2_0_train_infer_python.txt deleted file mode 100644 index ff4939cec8b735ea7babaa6e23c5a4a25497158a..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x2_0_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_amp_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_amp_infer_python.txt deleted file mode 100644 index 26e95cb7897d83a45355300bf1fdd4d1cf4aa3b5..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_amp_infer_python.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x2_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_infer_python.txt deleted file mode 100644 index 723939b03fa0d6adb69534d24a69a1bc2c07bf36..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_infer_python.txt +++ /dev/null @@ -1,53 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x2_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt deleted file mode 100644 index 74c6d046f9d9ff118b83826037b46ba2fb9fadf3..0000000000000000000000000000000000000000 --- a/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:PPLCNet_x2_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:amp --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null diff --git a/test_tipc/config/PVTV2/PVT_V2_B0_train_infer_python.txt b/test_tipc/config/PVTV2/PVT_V2_B0_train_infer_python.txt deleted file mode 100644 index a4229d854a9706cfa9bd0baeb18c6c59803197ce..0000000000000000000000000000000000000000 --- a/test_tipc/config/PVTV2/PVT_V2_B0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:PVT_V2_B0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B0.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PVTV2/PVT_V2_B1_train_infer_python.txt b/test_tipc/config/PVTV2/PVT_V2_B1_train_infer_python.txt deleted file mode 100644 index 5394790a03b68366a83b4ea2fba3e67c988d7f1e..0000000000000000000000000000000000000000 --- a/test_tipc/config/PVTV2/PVT_V2_B1_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:PVT_V2_B1 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B1.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PVTV2/PVT_V2_B2_Linear_train_infer_python.txt b/test_tipc/config/PVTV2/PVT_V2_B2_Linear_train_infer_python.txt deleted file mode 100644 index f50107fea5309eee26b3e46f1d9f7388ff7f1b59..0000000000000000000000000000000000000000 --- a/test_tipc/config/PVTV2/PVT_V2_B2_Linear_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:PVT_V2_B2_Linear -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B2_Linear.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:128 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/PVTV2/PVT_V2_B2_train_infer_python.txt b/test_tipc/config/PVTV2/PVT_V2_B2_train_infer_python.txt deleted file mode 100644 index d2de07223364b1eb78597c7de97e8a10725bbc18..0000000000000000000000000000000000000000 --- a/test_tipc/config/PVTV2/PVT_V2_B2_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:PVT_V2_B2 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B2.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/PVTV2/PVT_V2_B3_train_infer_python.txt b/test_tipc/config/PVTV2/PVT_V2_B3_train_infer_python.txt deleted file mode 100644 index 8f71e148fdf61147d6b4c5acdd818e8051222409..0000000000000000000000000000000000000000 --- a/test_tipc/config/PVTV2/PVT_V2_B3_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:PVT_V2_B3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B3.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/PVTV2/PVT_V2_B4_train_infer_python.txt b/test_tipc/config/PVTV2/PVT_V2_B4_train_infer_python.txt deleted file mode 100644 index 304deb6872641c7cb5ae51ccc1aa241fc621e660..0000000000000000000000000000000000000000 --- a/test_tipc/config/PVTV2/PVT_V2_B4_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:PVT_V2_B4 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B4.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/PVTV2/PVT_V2_B5_train_infer_python.txt b/test_tipc/config/PVTV2/PVT_V2_B5_train_infer_python.txt deleted file mode 100644 index 9527cb2d385ed217e3212fd535629b2407ff8dee..0000000000000000000000000000000000000000 --- a/test_tipc/config/PVTV2/PVT_V2_B5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:PVT_V2_B5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B5.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/ReXNet/ReXNet_1_0_train_amp_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_1_0_train_amp_infer_python.txt deleted file mode 100644 index 9ae14e503dbd6a13a0eddf3a29bbbc13d3e73fd8..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ReXNet/ReXNet_1_0_train_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_1_0_train_infer_python.txt deleted file mode 100644 index 4f505f6e283642df524cc1845e1e0ba553cb4b00..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_1_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ReXNet/ReXNet_1_3_train_amp_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_1_3_train_amp_infer_python.txt deleted file mode 100644 index f5c7aed52a243a03ff63269c5312b021786a7d13..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_1_3_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_1_3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ReXNet/ReXNet_1_3_train_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_1_3_train_infer_python.txt deleted file mode 100644 index e21cfa98f7a8a3bdea58a5a7606d601f830d5a92..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_1_3_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_1_3 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_3_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ReXNet/ReXNet_1_5_train_amp_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_1_5_train_amp_infer_python.txt deleted file mode 100644 index 3399aad8da871d85b4b3c37acfcb657dcfa58c04..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_1_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ReXNet/ReXNet_1_5_train_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_1_5_train_infer_python.txt deleted file mode 100644 index 4e750926ef4d5e933b4b05aba63f15dda9c9d7cb..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_1_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ReXNet/ReXNet_2_0_train_amp_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_2_0_train_amp_infer_python.txt deleted file mode 100644 index f036e87b9c7671150f537ae3d458218e83748115..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_2_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ReXNet/ReXNet_2_0_train_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_2_0_train_infer_python.txt deleted file mode 100644 index b98182f27155e2a8d14a3d88781e86294f767aca..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_2_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ReXNet/ReXNet_3_0_train_amp_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_3_0_train_amp_infer_python.txt deleted file mode 100644 index 3263e6e16ad1a259e2804c40f6ac6a4319eca726..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_3_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_3_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ReXNet/ReXNet_3_0_train_infer_python.txt b/test_tipc/config/ReXNet/ReXNet_3_0_train_infer_python.txt deleted file mode 100644 index d8b9ac8ed95322cfd6a3673815f63c31b73e0716..0000000000000000000000000000000000000000 --- a/test_tipc/config/ReXNet/ReXNet_3_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ReXNet_3_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/RedNet/RedNet101_train_amp_infer_python.txt b/test_tipc/config/RedNet/RedNet101_train_amp_infer_python.txt deleted file mode 100644 index 8255130c48d2b5083c2e629d8a4381203b98806f..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet101_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:RedNet101 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/RedNet/RedNet101_train_infer_python.txt b/test_tipc/config/RedNet/RedNet101_train_infer_python.txt deleted file mode 100644 index 948f1fea96edb779f3cdd12867c5813e1a5e8b74..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet101_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:RedNet101 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/RedNet/RedNet152_train_amp_infer_python.txt b/test_tipc/config/RedNet/RedNet152_train_amp_infer_python.txt deleted file mode 100644 index 5b208005b8357165d130d4e74e1cc943efe36b7c..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet152_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:RedNet152 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/RedNet/RedNet152_train_infer_python.txt b/test_tipc/config/RedNet/RedNet152_train_infer_python.txt deleted file mode 100644 index fca7c0e0e10d4d03bc85eb28f66a9b58dfd1a4e1..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet152_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:RedNet152 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/RedNet/RedNet26_train_amp_infer_python.txt b/test_tipc/config/RedNet/RedNet26_train_amp_infer_python.txt deleted file mode 100644 index 1aa6bbd4d0ca15b7b07852b73b8c4c58175542b4..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet26_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:RedNet26 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet26_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/RedNet/RedNet26_train_infer_python.txt b/test_tipc/config/RedNet/RedNet26_train_infer_python.txt deleted file mode 100644 index 7b13978e71ad058dbcbceed695b1f8da0385abe8..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet26_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:RedNet26 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet26_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/RedNet/RedNet38_train_amp_infer_python.txt b/test_tipc/config/RedNet/RedNet38_train_amp_infer_python.txt deleted file mode 100644 index 05715a860b2f21b30d5c869c1115d98dcf41cd84..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet38_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:RedNet38 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet38_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/RedNet/RedNet38_train_infer_python.txt b/test_tipc/config/RedNet/RedNet38_train_infer_python.txt deleted file mode 100644 index 9ba325fe0628d45d43759a1a3a01e7a9a8f10dc6..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet38_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:RedNet38 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet38_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/RedNet/RedNet50_train_amp_infer_python.txt b/test_tipc/config/RedNet/RedNet50_train_amp_infer_python.txt deleted file mode 100644 index 3ea2e0d316e239af4eab2f93d21b53e28d88fe12..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet50_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:RedNet50 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet50_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/RedNet/RedNet50_train_infer_python.txt b/test_tipc/config/RedNet/RedNet50_train_infer_python.txt deleted file mode 100644 index 14265c9ff3e39b1792a9e035f6d74f3e92e5c8a7..0000000000000000000000000000000000000000 --- a/test_tipc/config/RedNet/RedNet50_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:RedNet50 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet50_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Res2Net/Res2Net101_vd_26w_4s_train_amp_infer_python.txt b/test_tipc/config/Res2Net/Res2Net101_vd_26w_4s_train_amp_infer_python.txt deleted file mode 100644 index 6a9f287429be801d96be984048bd0be5fedb4c68..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net101_vd_26w_4s_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net101_vd_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net101_vd_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Res2Net/Res2Net101_vd_26w_4s_train_infer_python.txt b/test_tipc/config/Res2Net/Res2Net101_vd_26w_4s_train_infer_python.txt deleted file mode 100644 index c8bab72c10aedb0515ad5381cb4848a7785ece19..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net101_vd_26w_4s_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net101_vd_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net101_vd_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Res2Net/Res2Net200_vd_26w_4s_train_amp_infer_python.txt b/test_tipc/config/Res2Net/Res2Net200_vd_26w_4s_train_amp_infer_python.txt deleted file mode 100644 index c8157cae60a41cfa15458d4b697000cd2f9f20d0..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net200_vd_26w_4s_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net200_vd_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Res2Net/Res2Net200_vd_26w_4s_train_infer_python.txt b/test_tipc/config/Res2Net/Res2Net200_vd_26w_4s_train_infer_python.txt deleted file mode 100644 index 6fab68f036d76ef948621b253418cee0ca364bc6..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net200_vd_26w_4s_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net200_vd_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Res2Net/Res2Net50_14w_8s_train_amp_infer_python.txt b/test_tipc/config/Res2Net/Res2Net50_14w_8s_train_amp_infer_python.txt deleted file mode 100644 index 6bfee99062699c581a38bb2fe14ed6ad986750c2..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net50_14w_8s_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net50_14w_8s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_14w_8s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Res2Net/Res2Net50_14w_8s_train_infer_python.txt b/test_tipc/config/Res2Net/Res2Net50_14w_8s_train_infer_python.txt deleted file mode 100644 index f74ddf866772c2eb3c64a07136a4643f95ebdad0..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net50_14w_8s_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net50_14w_8s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_14w_8s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Res2Net/Res2Net50_26w_4s_train_amp_infer_python.txt b/test_tipc/config/Res2Net/Res2Net50_26w_4s_train_amp_infer_python.txt deleted file mode 100644 index 4a33c79c4c7f4b216e9974376fedc6024f269c8e..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net50_26w_4s_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net50_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Res2Net/Res2Net50_26w_4s_train_infer_python.txt b/test_tipc/config/Res2Net/Res2Net50_26w_4s_train_infer_python.txt deleted file mode 100644 index 1c4c9242bb7affae0045936d65ded2b8228d2fb0..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net50_26w_4s_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net50_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Res2Net/Res2Net50_vd_26w_4s_train_amp_infer_python.txt b/test_tipc/config/Res2Net/Res2Net50_vd_26w_4s_train_amp_infer_python.txt deleted file mode 100644 index cd3409a333240e4026960a089c7720b97f772eba..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net50_vd_26w_4s_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net50_vd_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_vd_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Res2Net/Res2Net50_vd_26w_4s_train_infer_python.txt b/test_tipc/config/Res2Net/Res2Net50_vd_26w_4s_train_infer_python.txt deleted file mode 100644 index 6e8b05f0fadbd9a3a6016631e2e57b75d539df70..0000000000000000000000000000000000000000 --- a/test_tipc/config/Res2Net/Res2Net50_vd_26w_4s_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Res2Net50_vd_26w_4s -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_vd_26w_4s_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeSt/ResNeSt50_fast_1s1x64d_train_amp_infer_python.txt b/test_tipc/config/ResNeSt/ResNeSt50_fast_1s1x64d_train_amp_infer_python.txt deleted file mode 100644 index 0a8e0d0a97e8fbb24579e0d87f720169f1d5e722..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeSt/ResNeSt50_fast_1s1x64d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeSt50_fast_1s1x64d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_fast_1s1x64d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeSt/ResNeSt50_fast_1s1x64d_train_infer_python.txt b/test_tipc/config/ResNeSt/ResNeSt50_fast_1s1x64d_train_infer_python.txt deleted file mode 100644 index 00166a60ca3a70b16d38a6333f49261cd3a58347..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeSt/ResNeSt50_fast_1s1x64d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeSt50_fast_1s1x64d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_fast_1s1x64d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeSt/ResNeSt50_train_amp_infer_python.txt b/test_tipc/config/ResNeSt/ResNeSt50_train_amp_infer_python.txt deleted file mode 100644 index 1d0e59216c17dacaf255f3ce7a4d3020d4d48c48..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeSt/ResNeSt50_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeSt50 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeSt/ResNeSt50_train_infer_python.txt b/test_tipc/config/ResNeSt/ResNeSt50_train_infer_python.txt deleted file mode 100644 index 70681273249675b16b04b370333b396d7f5598aa..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeSt/ResNeSt50_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeSt50 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt101_32x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_32x4d_train_amp_infer_python.txt deleted file mode 100644 index 1ca28d104c9b4f9d1e43869c4e3fb4b1cf6c0019..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt101_32x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_32x4d_train_infer_python.txt deleted file mode 100644 index 4f9df21ec66d409b84a6ac3a7a6952afec168c00..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt101_64x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_64x4d_train_amp_infer_python.txt deleted file mode 100644 index 69db3f17bf2b6e142cbeaecf4f5e131571966676..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_64x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt101_64x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_64x4d_train_infer_python.txt deleted file mode 100644 index e15b752694e7b438cd8964a647e9771d73c9b61d..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_64x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt101_vd_32x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_vd_32x4d_train_amp_infer_python.txt deleted file mode 100644 index 22edd3b99ed6355199d1cfcf13f481be3d76cc04..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_vd_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt101_vd_32x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_vd_32x4d_train_infer_python.txt deleted file mode 100644 index 87d8396c7d993e353f64fa1a56742de308a906a9..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_vd_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt101_vd_64x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_vd_64x4d_train_amp_infer_python.txt deleted file mode 100644 index 73f786f113a566bde9a1fb75e49b223c6701756d..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_vd_64x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_vd_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt101_vd_64x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt101_vd_64x4d_train_infer_python.txt deleted file mode 100644 index 4ec192cc76656532a481436aa48e18b49d3be296..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt101_vd_64x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_vd_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt152_32x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_32x4d_train_amp_infer_python.txt deleted file mode 100644 index 1ca28d104c9b4f9d1e43869c4e3fb4b1cf6c0019..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt152_32x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_32x4d_train_infer_python.txt deleted file mode 100644 index 4f9df21ec66d409b84a6ac3a7a6952afec168c00..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt101_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt152_64x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_64x4d_train_amp_infer_python.txt deleted file mode 100644 index 3841f2b7e72e3d6c82a879e833f65758a2f4a934..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_64x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt152_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt152_64x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_64x4d_train_infer_python.txt deleted file mode 100644 index 0d1bdf8f1a497adafd12ab1176eb1e32b823309f..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_64x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt152_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt152_vd_32x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_vd_32x4d_train_amp_infer_python.txt deleted file mode 100644 index b0baf65150176639f60200ebd73b3b708aff41ea..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_vd_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt152_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt152_vd_32x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_vd_32x4d_train_infer_python.txt deleted file mode 100644 index 2cb58880278f9edd85255d1dcfd4260d475fdfd7..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_vd_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt152_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt152_vd_64x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_vd_64x4d_train_amp_infer_python.txt deleted file mode 100644 index 573b579ff161aca2741867bece064ff10fd5bc22..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_vd_64x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt152_vd_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt152_vd_64x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt152_vd_64x4d_train_infer_python.txt deleted file mode 100644 index 20765c423704335a60ea56aab615979349e1a036..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt152_vd_64x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt152_vd_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt50_32x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_32x4d_train_amp_infer_python.txt deleted file mode 100644 index b28bd26fc7f2af8996722593f41129d3898b3e93..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt50_32x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_32x4d_train_infer_python.txt deleted file mode 100644 index d1fcf270c9da80660800eec0cec9d24599fac7f4..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt50_64x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_64x4d_train_amp_infer_python.txt deleted file mode 100644 index 9d04710ab9b93546f4ff235d48c95eaf0cf25e40..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_64x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt50_64x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_64x4d_train_infer_python.txt deleted file mode 100644 index 81eab9aa344493d20e19a79e3458b2988475717c..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_64x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt50_vd_32x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_vd_32x4d_train_amp_infer_python.txt deleted file mode 100644 index b502731f745810f6108c697a8b082e4805a55a89..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_vd_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt50_vd_32x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_vd_32x4d_train_infer_python.txt deleted file mode 100644 index 1d18804f41c09d6f09959d5efb979cee9b81242e..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_vd_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNeXt/ResNeXt50_vd_64x4d_train_amp_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_vd_64x4d_train_amp_infer_python.txt deleted file mode 100644 index b0b0b3fe272ca5254fe59faa504cb3905b8c22fc..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_vd_64x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_vd_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNeXt/ResNeXt50_vd_64x4d_train_infer_python.txt b/test_tipc/config/ResNeXt/ResNeXt50_vd_64x4d_train_infer_python.txt deleted file mode 100644 index b63ed3afd399ed233a470436a8849205fdb982e3..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNeXt/ResNeXt50_vd_64x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNeXt50_vd_64x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_64x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet101_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet101_train_amp_infer_python.txt deleted file mode 100644 index 9172ef8cf54d76ffe17079a17b6bf796df2a3351..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet101_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet101 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet101_train_infer_python.txt b/test_tipc/config/ResNet/ResNet101_train_infer_python.txt deleted file mode 100644 index aee834cff7f5a2e86c798f1974dd85f32745143f..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet101_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet101 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet101_vd_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet101_vd_train_amp_infer_python.txt deleted file mode 100644 index eb8238584b3828a12c7c8d75118dcc78da8a0562..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet101_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet101_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet101_vd_train_infer_python.txt b/test_tipc/config/ResNet/ResNet101_vd_train_infer_python.txt deleted file mode 100644 index 3c9745b35f06a8f3485c61706742cab79d53bb2a..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet101_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet101_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet152_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet152_train_amp_infer_python.txt deleted file mode 100644 index 8549cc07cde50b4f51da729aacebed08470c2187..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet152_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet152 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet152_train_infer_python.txt b/test_tipc/config/ResNet/ResNet152_train_infer_python.txt deleted file mode 100644 index e45266a9a83dfd93561c91e3543f668d802cc440..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet152_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:ResNet152 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:32 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet152_vd_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet152_vd_train_amp_infer_python.txt deleted file mode 100644 index a8e8c630516cd8ce1c5918a60e0feffdfd746270..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet152_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet152_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet152_vd_train_infer_python.txt b/test_tipc/config/ResNet/ResNet152_vd_train_infer_python.txt deleted file mode 100644 index 0ca10de1d62986dc8c2f1ebe1cb84cc59620fd87..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet152_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet152_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet18_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet18_train_amp_infer_python.txt deleted file mode 100644 index 4f2badaf2ce2f61ffe7676825411fdb0f955cad8..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet18_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet18 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet18_train_infer_python.txt b/test_tipc/config/ResNet/ResNet18_train_infer_python.txt deleted file mode 100644 index c71707d31a9ea0c440c0c05536d907741fc77b5f..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet18_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet18 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet18_vd_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet18_vd_train_amp_infer_python.txt deleted file mode 100644 index 4dc479177c33e078023dcd352cba51b74b5417b2..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet18_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet18_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet18_vd_train_infer_python.txt b/test_tipc/config/ResNet/ResNet18_vd_train_infer_python.txt deleted file mode 100644 index c9ba57d2532a55de69f8b507985af7f03671072a..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet18_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet18_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet200_vd_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet200_vd_train_amp_infer_python.txt deleted file mode 100644 index a829b20fa8c5588eaea68cf705bf06fd6fd5fef7..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet200_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet200_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet200_vd_train_infer_python.txt b/test_tipc/config/ResNet/ResNet200_vd_train_infer_python.txt deleted file mode 100644 index 810aef1f402ac9caab76dd931db7ee39de57e885..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet200_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet200_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet34_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet34_train_amp_infer_python.txt deleted file mode 100644 index 301ca3d91f6130a49b66ba551e9cabe8a426d0b1..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet34_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet34 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet34_train_infer_python.txt b/test_tipc/config/ResNet/ResNet34_train_infer_python.txt deleted file mode 100644 index 7f1fef2962be1f33b7638793aec9197ae8817c2e..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet34_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet34 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet34_vd_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet34_vd_train_amp_infer_python.txt deleted file mode 100644 index 62473756e7f8b5195a5782e639254643a44444ce..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet34_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet34_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet34_vd_train_infer_python.txt b/test_tipc/config/ResNet/ResNet34_vd_train_infer_python.txt deleted file mode 100644 index 4771fa0bb1baf06fad9db5aa2d663c4d503f4f3f..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet34_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet34_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet50_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet50_train_amp_infer_python.txt deleted file mode 100644 index a398086aaf466d91e330d0e794943324e9913870..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet50 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet50_train_infer_python.txt b/test_tipc/config/ResNet/ResNet50_train_infer_python.txt deleted file mode 100644 index 6a668ccd311c82f33f2843ac1ee086027635dd0d..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:ResNet50 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:128 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet50_vd_FPGM_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet50_vd_FPGM_train_amp_infer_python.txt deleted file mode 100644 index 0b00effeb535ecefd85507f869815dc5cde1253a..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_vd_FPGM_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet50_vd_FPGM -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet50_vd_PACT_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet50_vd_PACT_train_amp_infer_python.txt deleted file mode 100644 index e4c65b010a1dd36f4a5b34cf1d03980913c11c18..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_vd_PACT_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet50_vd_PACT -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt deleted file mode 100644 index 51c73f13d46c3a8793f9b5db92a74e0aa7b4e599..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt +++ /dev/null @@ -1,18 +0,0 @@ -===========================cpp_infer_params=========================== -model_name:ResNet50_vd -cpp_infer_type:cls -cls_inference_model_dir:./cls_inference/ -det_inference_model_dir: -cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/ResNet50_vd_inference.tar -det_inference_url: -infer_quant:False -inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml -use_gpu:True|False -enable_mkldnn:True|False -cpu_threads:1|6 -batch_size:1 -use_tensorrt:False|True -precision:fp32|fp16 -image_dir:./dataset/ILSVRC2012/val -benchmark:True -generate_yaml_cmd:python3 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt deleted file mode 100644 index 163bb48429b468433ee6bb539c029f51fe364190..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt +++ /dev/null @@ -1,14 +0,0 @@ -===========================paddle2onnx_params=========================== -model_name:ResNet50_vd -python:python3.7 -2onnx: paddle2onnx ---model_dir:./deploy/models/ResNet50_vd_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---save_file:./deploy/models/ResNet50_vd_infer/inference.onnx ---opset_version:10 ---enable_onnx_checker:True -inference: python/predict_cls.py -c configs/inference_cls.yaml -Global.use_onnx:True -Global.inference_model_dir:models/ResNet50_vd_infer/ -Global.use_gpu:False diff --git a/test_tipc/config/ResNet/ResNet50_vd_train_amp_infer_python.txt b/test_tipc/config/ResNet/ResNet50_vd_train_amp_infer_python.txt deleted file mode 100644 index a9cc2d027752db2880ff1655b69c1adc7d82bb19..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet50_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt b/test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt deleted file mode 100644 index bf4de414faa0dce0bdd3894b12964b0d0b762f35..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ResNet50_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -fpgm_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/config/ResNet/ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt deleted file mode 100644 index 22c0f8db8395ad8bea6468cb92a5f28606fd3cac..0000000000000000000000000000000000000000 --- a/test_tipc/config/ResNet/ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ResNet50_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:amp --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -fpgm_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -distill_export:null -kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference -export2:null -inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/ResNet50_vd_inference.tar -infer_model:../inference/ -infer_export:null -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SENet154_vd_train_amp_infer_python.txt b/test_tipc/config/SENet/SENet154_vd_train_amp_infer_python.txt deleted file mode 100644 index 2bd7b3c5bf2c8ec7784268ece10e3f35c7c199ee..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SENet154_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SENet154_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SENet154_vd_train_infer_python.txt b/test_tipc/config/SENet/SENet154_vd_train_infer_python.txt deleted file mode 100644 index 5da5e4bca6b4b8decdef7fb9b4b3c16eefcd0a50..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SENet154_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SENet154_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SENet/SE_ResNeXt101_32x4d_train_amp_infer_python.txt b/test_tipc/config/SENet/SE_ResNeXt101_32x4d_train_amp_infer_python.txt deleted file mode 100644 index 57afa0d0a207fee2a4d5029205eb13d4154a86ae..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNeXt101_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNeXt101_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SE_ResNeXt101_32x4d_train_infer_python.txt b/test_tipc/config/SENet/SE_ResNeXt101_32x4d_train_infer_python.txt deleted file mode 100644 index 1a61d0c229da34b3ca605dc4019908533c82fc9b..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNeXt101_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNeXt101_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SENet/SE_ResNeXt50_32x4d_train_amp_infer_python.txt b/test_tipc/config/SENet/SE_ResNeXt50_32x4d_train_amp_infer_python.txt deleted file mode 100644 index ae0a334de05c91a628099674da99d9bf0d73fc29..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNeXt50_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNeXt50_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SE_ResNeXt50_32x4d_train_infer_python.txt b/test_tipc/config/SENet/SE_ResNeXt50_32x4d_train_infer_python.txt deleted file mode 100644 index a8bd8b553a4905ebc8e7e7ca9ae22aaaeb52512a..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNeXt50_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNeXt50_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SENet/SE_ResNeXt50_vd_32x4d_train_amp_infer_python.txt b/test_tipc/config/SENet/SE_ResNeXt50_vd_32x4d_train_amp_infer_python.txt deleted file mode 100644 index 3cea2c6afdf16bd8e05d5a8e3a7b3d464149402d..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNeXt50_vd_32x4d_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNeXt50_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SE_ResNeXt50_vd_32x4d_train_infer_python.txt b/test_tipc/config/SENet/SE_ResNeXt50_vd_32x4d_train_infer_python.txt deleted file mode 100644 index 1ea4449d5cd250534fad7492ff73daddd7dd3b74..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNeXt50_vd_32x4d_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNeXt50_vd_32x4d -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_vd_32x4d_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SENet/SE_ResNet18_vd_train_amp_infer_python.txt b/test_tipc/config/SENet/SE_ResNet18_vd_train_amp_infer_python.txt deleted file mode 100644 index 872e1ce72d015723c570a19e7ca849631e050e36..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNet18_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNet18_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet18_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SE_ResNet18_vd_train_infer_python.txt b/test_tipc/config/SENet/SE_ResNet18_vd_train_infer_python.txt deleted file mode 100644 index 1a0f673efb9c695ef507532fde00f4a344ac3c87..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNet18_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNet18_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet18_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SENet/SE_ResNet34_vd_train_amp_infer_python.txt b/test_tipc/config/SENet/SE_ResNet34_vd_train_amp_infer_python.txt deleted file mode 100644 index 4ac5c06b4334a727bd8981cb6fd54e0af4d5b413..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNet34_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNet34_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet34_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SE_ResNet34_vd_train_infer_python.txt b/test_tipc/config/SENet/SE_ResNet34_vd_train_infer_python.txt deleted file mode 100644 index 742dd4ba96133338bfce9a56433109eec2e38808..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNet34_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNet34_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet34_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SENet/SE_ResNet50_vd_train_amp_infer_python.txt b/test_tipc/config/SENet/SE_ResNet50_vd_train_amp_infer_python.txt deleted file mode 100644 index d9bc19b579704acce852c651d60d6410596ac497..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNet50_vd_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNet50_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet50_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SENet/SE_ResNet50_vd_train_infer_python.txt b/test_tipc/config/SENet/SE_ResNet50_vd_train_infer_python.txt deleted file mode 100644 index 9edf4dd24fbd819cd40ef2cebd302f5f9114ae5e..0000000000000000000000000000000000000000 --- a/test_tipc/config/SENet/SE_ResNet50_vd_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SE_ResNet50_vd -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet50_vd_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_swish_train_amp_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_swish_train_amp_infer_python.txt deleted file mode 100644 index 9822328e323c36d56a7fec860f6a55474c446a81..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_swish_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_swish -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_swish_train_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_swish_train_infer_python.txt deleted file mode 100644 index da05d8a3af323b1799128ca70124a9b872c180b6..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_swish_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_swish -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_25_train_amp_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_25_train_amp_infer_python.txt deleted file mode 100644 index e2e6a4c995ebc9739328a8dc3b83f17a4545633b..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_25_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_25_train_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_25_train_infer_python.txt deleted file mode 100644 index 1dbc646c83e6794bb117dff17e23df4a94577a94..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_25_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x0_25 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_33_train_amp_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_33_train_amp_infer_python.txt deleted file mode 100644 index 97b59cc65db4a81779a9429961858ecc41790cc3..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_33_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x0_33 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_33_train_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_33_train_infer_python.txt deleted file mode 100644 index 3c429924af3d589088ab7ee57a6052d6b241d6c9..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_33_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x0_33 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_5_train_amp_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_5_train_amp_infer_python.txt deleted file mode 100644 index 4dc126ab690428a4ba9af8cda59ffaf445dc0021..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_5_train_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_5_train_infer_python.txt deleted file mode 100644 index ac890e7ab2ec563ecc53155a23816f8dc7eae373..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x0_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x0_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_0_train_amp_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_0_train_amp_infer_python.txt deleted file mode 100644 index e738f37653716ca6a06764c80645ee0e200a8ce7..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_0_train_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_0_train_infer_python.txt deleted file mode 100644 index 9b3cc1d5af028a627f99fccdb7a4a8a3dc11e2cc..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_0_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -to_static_train:-o Global.to_static=True -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:256|1536 -fp_items:fp32 -epoch:2 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_5_train_amp_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_5_train_amp_infer_python.txt deleted file mode 100644 index 7317c4780f60296247cf96d2753e7e761328fd39..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_5_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_5_train_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_5_train_infer_python.txt deleted file mode 100644 index 3564be4dcd9e3ad9cd445e89373933a58ccff936..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x1_5_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x1_5 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x2_0_train_amp_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x2_0_train_amp_infer_python.txt deleted file mode 100644 index 020b44b68fd5e6ae25270097a6ce5f3126ae5dd5..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x2_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/ShuffleNet/ShuffleNetV2_x2_0_train_infer_python.txt b/test_tipc/config/ShuffleNet/ShuffleNetV2_x2_0_train_infer_python.txt deleted file mode 100644 index 87b2b340c8044e95c05cbcb485860338811050c9..0000000000000000000000000000000000000000 --- a/test_tipc/config/ShuffleNet/ShuffleNetV2_x2_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ShuffleNetV2_x2_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SqueezeNet/SqueezeNet1_0_train_amp_infer_python.txt b/test_tipc/config/SqueezeNet/SqueezeNet1_0_train_amp_infer_python.txt deleted file mode 100644 index 33746bcd1b837a67dcc69364a93d7353fcf0cd95..0000000000000000000000000000000000000000 --- a/test_tipc/config/SqueezeNet/SqueezeNet1_0_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SqueezeNet1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SqueezeNet/SqueezeNet1_0_train_infer_python.txt b/test_tipc/config/SqueezeNet/SqueezeNet1_0_train_infer_python.txt deleted file mode 100644 index 274d759db7cfbed50c584a367160280a38820940..0000000000000000000000000000000000000000 --- a/test_tipc/config/SqueezeNet/SqueezeNet1_0_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SqueezeNet1_0 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SqueezeNet/SqueezeNet1_1_train_amp_infer_python.txt b/test_tipc/config/SqueezeNet/SqueezeNet1_1_train_amp_infer_python.txt deleted file mode 100644 index 943a05e46808bbc60acb956899e710c2fba9710c..0000000000000000000000000000000000000000 --- a/test_tipc/config/SqueezeNet/SqueezeNet1_1_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SqueezeNet1_1 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SqueezeNet/SqueezeNet1_1_train_infer_python.txt b/test_tipc/config/SqueezeNet/SqueezeNet1_1_train_infer_python.txt deleted file mode 100644 index 06ad8c4cce46d41ac9577d87cb763b175f303bf5..0000000000000000000000000000000000000000 --- a/test_tipc/config/SqueezeNet/SqueezeNet1_1_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SqueezeNet1_1 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_amp_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_amp_infer_python.txt deleted file mode 100644 index d4e0a4122e027329a88c3890e5adea5f684f8123..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_base_patch4_window12_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window12_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_infer_python.txt deleted file mode 100644 index 45e4d8534f3e172899d619bcfcb19cdf1916dc65..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_base_patch4_window12_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window12_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_amp_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_amp_infer_python.txt deleted file mode 100644 index 8e686d07753b1ed2a9f441f7417908196518e24a..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_base_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_infer_python.txt deleted file mode 100644 index 4270e32ec27ca1cd233da7e20664b1d46575f3cf..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_base_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_amp_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_amp_infer_python.txt deleted file mode 100644 index 6ed1e29b0fb27171f4745c223e471e0b9d9a5153..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_large_patch4_window12_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_infer_python.txt deleted file mode 100644 index 737c4d3bd7bfe3537170226361ce775a73e4f88d..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_large_patch4_window12_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_amp_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_amp_infer_python.txt deleted file mode 100644 index 46434bdb9c7b9023eec8a8a426ea1ec9e9a21cf6..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_large_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window7_224_22kto1k_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_infer_python.txt deleted file mode 100644 index 7c45933a977d42434982d12d261bd82502d493b5..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_large_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window7_224_22kto1k_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_amp_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_amp_infer_python.txt deleted file mode 100644 index 0475fa099927aaccdfd3c039d06606e173f445a5..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_small_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_small_patch4_window7_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_infer_python.txt deleted file mode 100644 index 1c862375db536c57eb5d648277b313f369f8a301..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_small_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_small_patch4_window7_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_amp_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_amp_infer_python.txt deleted file mode 100644 index aabba1e440395287fd4d1711a8020a72ac4f4ece..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_tiny_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_infer_python.txt deleted file mode 100644 index 8d8d4b314d03dc98d095f137626592a33450bab2..0000000000000000000000000000000000000000 --- a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:SwinTransformer_tiny_patch4_window7_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:64|104 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/TNT/TNT_small_train_amp_infer_python.txt b/test_tipc/config/TNT/TNT_small_train_amp_infer_python.txt deleted file mode 100644 index 1966ac184caecd85cb1475bfddad368fbb1ac085..0000000000000000000000000000000000000000 --- a/test_tipc/config/TNT/TNT_small_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:TNT_small -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_small_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/TNT/TNT_small_train_infer_python.txt b/test_tipc/config/TNT/TNT_small_train_infer_python.txt deleted file mode 100644 index 1e99ce2d6d5f9da79c6c57e75e88f8b1d518101d..0000000000000000000000000000000000000000 --- a/test_tipc/config/TNT/TNT_small_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:TNT_small -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_small_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Twins/alt_gvt_base_train_amp_infer_python.txt b/test_tipc/config/Twins/alt_gvt_base_train_amp_infer_python.txt deleted file mode 100644 index e14b8ddc9ab1fc9a1605057afa8c3905d47814f9..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/alt_gvt_base_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:alt_gvt_base -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_base_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Twins/alt_gvt_base_train_infer_python.txt b/test_tipc/config/Twins/alt_gvt_base_train_infer_python.txt deleted file mode 100644 index ef4b566c4627de9f9aee6fe7412ca6646e77ef7a..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/alt_gvt_base_train_infer_python.txt +++ /dev/null @@ -1,60 +0,0 @@ -===========================train_params=========================== -model_name:alt_gvt_base -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_base_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================train_benchmark_params========================== -batch_size:64|144 -fp_items:fp32 -epoch:1 ---profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/Twins/alt_gvt_large_train_amp_infer_python.txt b/test_tipc/config/Twins/alt_gvt_large_train_amp_infer_python.txt deleted file mode 100644 index 436bb75dabd38c2d66ff93cac9244467bd2e7525..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/alt_gvt_large_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:alt_gvt_large -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_large_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Twins/alt_gvt_large_train_infer_python.txt b/test_tipc/config/Twins/alt_gvt_large_train_infer_python.txt deleted file mode 100644 index 271e0c2c079811c19df88f42e00454d03192772b..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/alt_gvt_large_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:alt_gvt_large -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_large_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Twins/alt_gvt_small_train_amp_infer_python.txt b/test_tipc/config/Twins/alt_gvt_small_train_amp_infer_python.txt deleted file mode 100644 index f82314ba6e1d7283b0e5f49da739e332768d8a74..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/alt_gvt_small_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:alt_gvt_small -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_small_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Twins/alt_gvt_small_train_infer_python.txt b/test_tipc/config/Twins/alt_gvt_small_train_infer_python.txt deleted file mode 100644 index 26307ad1279df64e56a7647ecdafd7041708b3ac..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/alt_gvt_small_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:alt_gvt_small -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_small_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Twins/pcpvt_base_train_amp_infer_python.txt b/test_tipc/config/Twins/pcpvt_base_train_amp_infer_python.txt deleted file mode 100644 index cf85959c01a2c50f5d2636585d388d3a3fa966a2..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/pcpvt_base_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:pcpvt_base -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_base_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Twins/pcpvt_base_train_infer_python.txt b/test_tipc/config/Twins/pcpvt_base_train_infer_python.txt deleted file mode 100644 index a1b3c0f65f487b7b96865df412de58b13c689fcb..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/pcpvt_base_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:pcpvt_base -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_base_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Twins/pcpvt_large_train_amp_infer_python.txt b/test_tipc/config/Twins/pcpvt_large_train_amp_infer_python.txt deleted file mode 100644 index 622a62991c3c3332b18ac14238c3320b1ee7441d..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/pcpvt_large_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:pcpvt_large -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_large_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Twins/pcpvt_large_train_infer_python.txt b/test_tipc/config/Twins/pcpvt_large_train_infer_python.txt deleted file mode 100644 index 275801c5edd8814e3ec54d35e82389de2c7afcd5..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/pcpvt_large_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:pcpvt_large -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_large_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Twins/pcpvt_small_train_amp_infer_python.txt b/test_tipc/config/Twins/pcpvt_small_train_amp_infer_python.txt deleted file mode 100644 index 6d84c8f5eb10a53ea052fa7af303703b59235750..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/pcpvt_small_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:pcpvt_small -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_small_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Twins/pcpvt_small_train_infer_python.txt b/test_tipc/config/Twins/pcpvt_small_train_infer_python.txt deleted file mode 100644 index 95c4d54c3dcad9a98e446088f68f4a788474aca1..0000000000000000000000000000000000000000 --- a/test_tipc/config/Twins/pcpvt_small_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:pcpvt_small -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_small_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/VAN/VAN_tiny_train_infer_python.txt b/test_tipc/config/VAN/VAN_tiny_train_infer_python.txt deleted file mode 100644 index 7d884d27ea666fa89fddd9487909d1371436e760..0000000000000000000000000000000000000000 --- a/test_tipc/config/VAN/VAN_tiny_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:VAN_tiny -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VAN/VAN_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VAN/VAN_tiny.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VAN/VAN_tiny.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -inference_dir:null -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.5,0.5,0.5] -o PreProcess.transform_ops.2.NormalizeImage.std=[0.5,0.5,0.5] --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/VGG/VGG11_train_amp_infer_python.txt b/test_tipc/config/VGG/VGG11_train_amp_infer_python.txt deleted file mode 100644 index cf0fe60c48e929ffbe4e188c54c557180dabf649..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG11_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:VGG11 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG11_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VGG/VGG11_train_infer_python.txt b/test_tipc/config/VGG/VGG11_train_infer_python.txt deleted file mode 100644 index ed88fa4626640895b7ace894986513290f4cf344..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG11_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:VGG11 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG11_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/VGG/VGG13_train_amp_infer_python.txt b/test_tipc/config/VGG/VGG13_train_amp_infer_python.txt deleted file mode 100644 index 083a690c264a3441bafce83e4f9e9db398f17c29..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG13_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:VGG13 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG13_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VGG/VGG13_train_infer_python.txt b/test_tipc/config/VGG/VGG13_train_infer_python.txt deleted file mode 100644 index bc04ce858352e82de2e1c7ca45e2121c666002fa..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG13_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:VGG13 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG13_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/VGG/VGG16_train_amp_infer_python.txt b/test_tipc/config/VGG/VGG16_train_amp_infer_python.txt deleted file mode 100644 index 30b422af895cbec8cbc89f05090e058609e04342..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG16_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:VGG16 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG16_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VGG/VGG16_train_infer_python.txt b/test_tipc/config/VGG/VGG16_train_infer_python.txt deleted file mode 100644 index ff913c5352087f84289d0275c80763e30760cb00..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG16_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:VGG16 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG16_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/VGG/VGG19_train_amp_infer_python.txt b/test_tipc/config/VGG/VGG19_train_amp_infer_python.txt deleted file mode 100644 index ccbce1489852ce36701c7690abeaffe3e17221c5..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG19_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:VGG19 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG19_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VGG/VGG19_train_infer_python.txt b/test_tipc/config/VGG/VGG19_train_infer_python.txt deleted file mode 100644 index 9cd5ae5c691ab46690de7e0740c192bd602d4f84..0000000000000000000000000000000000000000 --- a/test_tipc/config/VGG/VGG19_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:VGG19 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG19_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/VisionTransformer/ViT_base_patch16_224_train_amp_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_base_patch16_224_train_amp_infer_python.txt deleted file mode 100644 index 74155b1f9215aebdb98ceb2f25dd430b462cfa82..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_base_patch16_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ViT_base_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VisionTransformer/ViT_base_patch16_224_train_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_base_patch16_224_train_infer_python.txt deleted file mode 100644 index bb73849e61cd2a8285d91f3db71c0796a510e3e7..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_base_patch16_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ViT_base_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/VisionTransformer/ViT_base_patch16_384_train_amp_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_base_patch16_384_train_amp_infer_python.txt deleted file mode 100644 index aa23031bf79f193c060451e7615c670f31f834b0..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_base_patch16_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ViT_base_patch16_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VisionTransformer/ViT_base_patch16_384_train_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_base_patch16_384_train_infer_python.txt deleted file mode 100644 index 20d3b73b94b59ad86878ad26ad1b9e10e401931c..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_base_patch16_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ViT_base_patch16_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/config/VisionTransformer/ViT_base_patch32_384_train_amp_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_base_patch32_384_train_amp_infer_python.txt deleted file mode 100644 index a5370b670ff3bcc0b2020f52f1e0ad8bbfd10850..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_base_patch32_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ViT_base_patch32_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch32_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VisionTransformer/ViT_base_patch32_384_train_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_base_patch32_384_train_infer_python.txt deleted file mode 100644 index f11d689651ba7f043c8c77eb9563bdc6d5b62320..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_base_patch32_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ViT_base_patch32_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch32_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/config/VisionTransformer/ViT_large_patch16_224_train_amp_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_large_patch16_224_train_amp_infer_python.txt deleted file mode 100644 index b86428b9fe077bcaeeff2ff6aa35161c977d141a..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_large_patch16_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ViT_large_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VisionTransformer/ViT_large_patch16_224_train_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_large_patch16_224_train_infer_python.txt deleted file mode 100644 index 0e07b743ec579e1a8a42c47eda89cc305aff67f4..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_large_patch16_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ViT_large_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/VisionTransformer/ViT_large_patch16_384_train_amp_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_large_patch16_384_train_amp_infer_python.txt deleted file mode 100644 index c733d7e8274fc08dd782351397c65884a63d8763..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_large_patch16_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ViT_large_patch16_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VisionTransformer/ViT_large_patch16_384_train_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_large_patch16_384_train_infer_python.txt deleted file mode 100644 index 06555842e7e2c37e70749836d9232348ecb1c18a..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_large_patch16_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ViT_large_patch16_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/config/VisionTransformer/ViT_large_patch32_384_train_amp_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_large_patch32_384_train_amp_infer_python.txt deleted file mode 100644 index f6ead75b81e298c9684d2b1847356203afce3f26..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_large_patch32_384_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ViT_large_patch32_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch32_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VisionTransformer/ViT_large_patch32_384_train_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_large_patch32_384_train_infer_python.txt deleted file mode 100644 index e513814b7a995ce13805283841769005ed7376c6..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_large_patch32_384_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ViT_large_patch32_384 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:2 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch32_384_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/config/VisionTransformer/ViT_small_patch16_224_train_amp_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_small_patch16_224_train_amp_infer_python.txt deleted file mode 100644 index 34fcaf290cddab226ee70b4be3caae2c10739f7b..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_small_patch16_224_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:ViT_small_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_small_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/VisionTransformer/ViT_small_patch16_224_train_infer_python.txt b/test_tipc/config/VisionTransformer/ViT_small_patch16_224_train_infer_python.txt deleted file mode 100644 index 2098dcf1064bd4b35582e611117adab19bae2b75..0000000000000000000000000000000000000000 --- a/test_tipc/config/VisionTransformer/ViT_small_patch16_224_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:ViT_small_patch16_224 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_small_patch16_224_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/Xception/Xception41_deeplab_train_amp_infer_python.txt b/test_tipc/config/Xception/Xception41_deeplab_train_amp_infer_python.txt deleted file mode 100644 index 325e0ad83092e10098496fc3224ff444aa291298..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception41_deeplab_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Xception41_deeplab -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_deeplab_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Xception/Xception41_deeplab_train_infer_python.txt b/test_tipc/config/Xception/Xception41_deeplab_train_infer_python.txt deleted file mode 100644 index b4b97c325b4664b8f56cca28a9f7f47209675b1a..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception41_deeplab_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Xception41_deeplab -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_deeplab_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/config/Xception/Xception41_train_amp_infer_python.txt b/test_tipc/config/Xception/Xception41_train_amp_infer_python.txt deleted file mode 100644 index f4569e71a7d75aa1dc2a9fd09c923ac68a4aec7c..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception41_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Xception41 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Xception/Xception41_train_infer_python.txt b/test_tipc/config/Xception/Xception41_train_infer_python.txt deleted file mode 100644 index 14f235d9dd70b8b279a88522ad263d0daad2a8ab..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception41_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Xception41 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/config/Xception/Xception65_deeplab_train_amp_infer_python.txt b/test_tipc/config/Xception/Xception65_deeplab_train_amp_infer_python.txt deleted file mode 100644 index 709423e618cf263d739c9b0eb797893b917c0f93..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception65_deeplab_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Xception65_deeplab -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_deeplab_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Xception/Xception65_deeplab_train_infer_python.txt b/test_tipc/config/Xception/Xception65_deeplab_train_infer_python.txt deleted file mode 100644 index c07b8b85082cf7145e67b59da618e957e57f1c29..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception65_deeplab_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Xception65_deeplab -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_deeplab_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/config/Xception/Xception65_train_amp_infer_python.txt b/test_tipc/config/Xception/Xception65_train_amp_infer_python.txt deleted file mode 100644 index ed1d042d3a2cee34b1e03ef834e3084d6ab2d006..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception65_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Xception65 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Xception/Xception65_train_infer_python.txt b/test_tipc/config/Xception/Xception65_train_infer_python.txt deleted file mode 100644 index 3397ef169ecade06b853aeb5a66b4c29491c4606..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception65_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Xception65 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/config/Xception/Xception71_train_amp_infer_python.txt b/test_tipc/config/Xception/Xception71_train_amp_infer_python.txt deleted file mode 100644 index 60867581f598b68c487673b71f47a94df1a318f0..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception71_train_amp_infer_python.txt +++ /dev/null @@ -1,52 +0,0 @@ -===========================train_params=========================== -model_name:Xception71 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:amp_train -amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception71_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null diff --git a/test_tipc/config/Xception/Xception71_train_infer_python.txt b/test_tipc/config/Xception/Xception71_train_infer_python.txt deleted file mode 100644 index 3a02806e48becba1b0c50995fc42943b42d25eb8..0000000000000000000000000000000000000000 --- a/test_tipc/config/Xception/Xception71_train_infer_python.txt +++ /dev/null @@ -1,54 +0,0 @@ -===========================train_params=========================== -model_name:Xception71 -python:python3.7 -gpu_list:0|0,1 --o Global.device:gpu --o Global.auto_cast:null --o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 --o Global.output_dir:./output/ --o DataLoader.Train.sampler.batch_size:8 --o Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./dataset/ILSVRC2012/val -null:null -## -trainer:norm_train -norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -null:null -## -===========================infer_params========================== --o Global.save_inference_dir:./inference --o Global.pretrained_model: -norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -quant_export:null -fpgm_export:null -distill_export:null -kl_quant:null -export2:null -pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception71_pretrained.pdparams -infer_model:../inference/ -infer_export:True -infer_quant:Fasle -inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 --o Global.use_gpu:True|False --o Global.enable_mkldnn:True|False --o Global.cpu_num_threads:1|6 --o Global.batch_size:1|16 --o Global.use_tensorrt:True|False --o Global.use_fp16:True|False --o Global.inference_model_dir:../inference --o Global.infer_imgs:../dataset/ILSVRC2012/val --o Global.save_log_path:null --o Global.benchmark:True -null:null -null:null -===========================infer_benchmark_params========================== -random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/configs/AlexNet/AlexNet_train_amp_infer_python.txt b/test_tipc/configs/AlexNet/AlexNet_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c4a085d28b361709a679752e1f87b5e0f582eff4 --- /dev/null +++ b/test_tipc/configs/AlexNet/AlexNet_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:AlexNet +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/AlexNet/AlexNet_train_infer_python.txt b/test_tipc/configs/AlexNet/AlexNet_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..990b67f8e3010f50f092e3f02866740e88be0577 --- /dev/null +++ b/test_tipc/configs/AlexNet/AlexNet_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:AlexNet +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/AlexNet/AlexNet.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/CSPNet/CSPDarkNet53_train_amp_infer_python.txt b/test_tipc/configs/CSPNet/CSPDarkNet53_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d1e48a6743f3d9e0d8a823ed1e5fcd98c9b8b813 --- /dev/null +++ b/test_tipc/configs/CSPNet/CSPDarkNet53_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:CSPDarkNet53 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSPDarkNet53_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=288 -o PreProcess.transform_ops.1.CropImage.size=256 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/CSPNet/CSPDarkNet53_train_infer_python.txt b/test_tipc/configs/CSPNet/CSPDarkNet53_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9869d6f40f8cdef6c8741ff7d9596b195601c52 --- /dev/null +++ b/test_tipc/configs/CSPNet/CSPDarkNet53_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:CSPDarkNet53 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSPNet/CSPDarkNet53.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSPDarkNet53_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=288 -o PreProcess.transform_ops.1.CropImage.size=256 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,256,256]}] \ No newline at end of file diff --git a/test_tipc/configs/CSWinTransformer/CSWinTransformer_base_224_train_infer_python.txt b/test_tipc/configs/CSWinTransformer/CSWinTransformer_base_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..198cdeef4462b4f94cedd12464b2499dca29efad --- /dev/null +++ b/test_tipc/configs/CSWinTransformer/CSWinTransformer_base_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:CSWinTransformer_base_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_base_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/CSWinTransformer/CSWinTransformer_base_384_train_infer_python.txt b/test_tipc/configs/CSWinTransformer/CSWinTransformer_base_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2e3957ec2f15bf65aabb2a50be4dd3fb8e8786ba --- /dev/null +++ b/test_tipc/configs/CSWinTransformer/CSWinTransformer_base_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:CSWinTransformer_base_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_base_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] diff --git a/test_tipc/configs/CSWinTransformer/CSWinTransformer_large_224_train_infer_python.txt b/test_tipc/configs/CSWinTransformer/CSWinTransformer_large_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1ba81846c001b6d1c046f24bd9b053d2602a1b9 --- /dev/null +++ b/test_tipc/configs/CSWinTransformer/CSWinTransformer_large_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:CSWinTransformer_large_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/CSWinTransformer/CSWinTransformer_large_384_train_infer_python.txt b/test_tipc/configs/CSWinTransformer/CSWinTransformer_large_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa58c136b86fb452245e1123d13df0564f1a42fb --- /dev/null +++ b/test_tipc/configs/CSWinTransformer/CSWinTransformer_large_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:CSWinTransformer_large_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:4 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_large_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] diff --git a/test_tipc/configs/CSWinTransformer/CSWinTransformer_small_224_train_infer_python.txt b/test_tipc/configs/CSWinTransformer/CSWinTransformer_small_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e33402d35c178198ece466bc7a189209e46ca41c --- /dev/null +++ b/test_tipc/configs/CSWinTransformer/CSWinTransformer_small_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:CSWinTransformer_small_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_small_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/CSWinTransformer/CSWinTransformer_tiny_224_train_infer_python.txt b/test_tipc/configs/CSWinTransformer/CSWinTransformer_tiny_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8b95f2f43a32e017e7535d5a4bf84347d63e994 --- /dev/null +++ b/test_tipc/configs/CSWinTransformer/CSWinTransformer_tiny_224_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:CSWinTransformer_tiny_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/CSWinTransformer_tiny_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/DLA/DLA102_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA102_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..794a10bb9d51a82dffd4200c0a90b825ae9e7d94 --- /dev/null +++ b/test_tipc/configs/DLA/DLA102_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA102 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA102_train_infer_python.txt b/test_tipc/configs/DLA/DLA102_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6de8b653d76345668836bc576d96778024b80fd1 --- /dev/null +++ b/test_tipc/configs/DLA/DLA102_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA102 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA102x2_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA102x2_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c4df9e4f383c0889909c70248bc43e87b7e4595d --- /dev/null +++ b/test_tipc/configs/DLA/DLA102x2_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA102x2 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x2_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA102x2_train_infer_python.txt b/test_tipc/configs/DLA/DLA102x2_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..eadc385aea11b804851314fe899459c3e58ee53d --- /dev/null +++ b/test_tipc/configs/DLA/DLA102x2_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA102x2 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x2.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x2_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA102x_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA102x_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e1251f5699d5abea774e380e602715c9d0cecb40 --- /dev/null +++ b/test_tipc/configs/DLA/DLA102x_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA102x +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA102x_train_infer_python.txt b/test_tipc/configs/DLA/DLA102x_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc269e225fbc5a97672e634e7fedfdc1e149422b --- /dev/null +++ b/test_tipc/configs/DLA/DLA102x_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA102x +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA102x.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA102x_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA169_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA169_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e6e92549a535e6ef9ad08c5cb516658518e0f95 --- /dev/null +++ b/test_tipc/configs/DLA/DLA169_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA169 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA169_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA169_train_infer_python.txt b/test_tipc/configs/DLA/DLA169_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e01a74cc3928be9bbc44d14bedc9b30d8231062d --- /dev/null +++ b/test_tipc/configs/DLA/DLA169_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA169 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA169.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA169_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA34_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA34_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fd2645506f92b23f3cac7f19d1a72f15fd3c0c6e --- /dev/null +++ b/test_tipc/configs/DLA/DLA34_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA34 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA34_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA34_train_infer_python.txt b/test_tipc/configs/DLA/DLA34_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..02fa85932c539c7b28de277fbbfa4933b64d5334 --- /dev/null +++ b/test_tipc/configs/DLA/DLA34_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA34 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA34.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA34_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA46_c_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA46_c_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f51270db1523f23239c3398f768020265121244b --- /dev/null +++ b/test_tipc/configs/DLA/DLA46_c_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA46_c +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46_c_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA46_c_train_infer_python.txt b/test_tipc/configs/DLA/DLA46_c_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..63dcd3c1f8a8548db62484a22a4f3e06edbf9c4a --- /dev/null +++ b/test_tipc/configs/DLA/DLA46_c_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA46_c +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46_c.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46_c_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA46x_c_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA46x_c_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d355e917b8ee0c3fdbb569c43b1f8b963310922b --- /dev/null +++ b/test_tipc/configs/DLA/DLA46x_c_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA46x_c +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46x_c_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA46x_c_train_infer_python.txt b/test_tipc/configs/DLA/DLA46x_c_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d11ced7979e45c70b3d3a726057a0d28cc6915d7 --- /dev/null +++ b/test_tipc/configs/DLA/DLA46x_c_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA46x_c +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA46x_c.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA46x_c_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA60_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA60_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..328980d0d6d3a8be154712ddbdcdbca3c0159cdd --- /dev/null +++ b/test_tipc/configs/DLA/DLA60_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA60 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA60_train_infer_python.txt b/test_tipc/configs/DLA/DLA60_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a214c1954e2e41f9869ccb70ee8bfe598afee80 --- /dev/null +++ b/test_tipc/configs/DLA/DLA60_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA60 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA60x_c_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA60x_c_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..25934061117571dba16dfa6bcb3ed8e8bd15e66e --- /dev/null +++ b/test_tipc/configs/DLA/DLA60x_c_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA60x_c +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA60x_c_train_infer_python.txt b/test_tipc/configs/DLA/DLA60x_c_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8cff0a4ff39594c3b5c1681b05c2751930368640 --- /dev/null +++ b/test_tipc/configs/DLA/DLA60x_c_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA60x_c +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x_c.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_c_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DLA/DLA60x_train_amp_infer_python.txt b/test_tipc/configs/DLA/DLA60x_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b47ade74f9f8c6aaed2e93797ce59b52bc11ae0 --- /dev/null +++ b/test_tipc/configs/DLA/DLA60x_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DLA60x +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DLA/DLA60x_train_infer_python.txt b/test_tipc/configs/DLA/DLA60x_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..37bd0bbb38e2aa34aa1294449eab16faa5f1e27f --- /dev/null +++ b/test_tipc/configs/DLA/DLA60x_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DLA60x +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DLA/DLA60x.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DLA60x_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DPN/DPN107_train_amp_infer_python.txt b/test_tipc/configs/DPN/DPN107_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..595569ef41df77454a5cbeb3fa070ea2e3e2437c --- /dev/null +++ b/test_tipc/configs/DPN/DPN107_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DPN107 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DPN/DPN107_train_infer_python.txt b/test_tipc/configs/DPN/DPN107_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..110bf85dd71fec781168a477acabb8dae3a40f0a --- /dev/null +++ b/test_tipc/configs/DPN/DPN107_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DPN107 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN107.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN107_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DPN/DPN131_train_amp_infer_python.txt b/test_tipc/configs/DPN/DPN131_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c19861de923e6d04158eec27c836b544f4ded60c --- /dev/null +++ b/test_tipc/configs/DPN/DPN131_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DPN131 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DPN/DPN131_train_infer_python.txt b/test_tipc/configs/DPN/DPN131_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..15fc1e1808e96ccc6dff5e76925299e7637d6568 --- /dev/null +++ b/test_tipc/configs/DPN/DPN131_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DPN131 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN131.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN131_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DPN/DPN68_train_amp_infer_python.txt b/test_tipc/configs/DPN/DPN68_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..73a1939288616657728e26911e556282c65b09c1 --- /dev/null +++ b/test_tipc/configs/DPN/DPN68_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DPN68 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN68_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DPN/DPN68_train_infer_python.txt b/test_tipc/configs/DPN/DPN68_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..43b3f3ac8baa3121c9d24b016425abe16ec4377e --- /dev/null +++ b/test_tipc/configs/DPN/DPN68_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DPN68 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN68.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN68_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DPN/DPN92_train_amp_infer_python.txt b/test_tipc/configs/DPN/DPN92_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2b8702a71b6568474662245685fc98b3025bef9a --- /dev/null +++ b/test_tipc/configs/DPN/DPN92_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DPN92 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN92_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DPN/DPN92_train_infer_python.txt b/test_tipc/configs/DPN/DPN92_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..667c50676a744c72e4ea28ae4bddab66a338b8a7 --- /dev/null +++ b/test_tipc/configs/DPN/DPN92_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DPN92 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN92.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN92_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DPN/DPN98_train_amp_infer_python.txt b/test_tipc/configs/DPN/DPN98_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..50b9a7849bc632d403522760aead13fefa8f8a8b --- /dev/null +++ b/test_tipc/configs/DPN/DPN98_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DPN98 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN98_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DPN/DPN98_train_infer_python.txt b/test_tipc/configs/DPN/DPN98_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c91cd98f2a4de69a11a6a0c8a8c11a5011598579 --- /dev/null +++ b/test_tipc/configs/DPN/DPN98_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DPN98 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DPN/DPN98.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DPN98_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DarkNet/DarkNet53_train_amp_infer_python.txt b/test_tipc/configs/DarkNet/DarkNet53_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..523314069c78593bba679812dee96541f7f58dd0 --- /dev/null +++ b/test_tipc/configs/DarkNet/DarkNet53_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DarkNet53 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DarkNet53_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DarkNet/DarkNet53_train_infer_python.txt b/test_tipc/configs/DarkNet/DarkNet53_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..44dd10505b56c81954db4cfe73b0b7d20f9d6f37 --- /dev/null +++ b/test_tipc/configs/DarkNet/DarkNet53_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DarkNet53 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DarkNet53_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,256,256]}] \ No newline at end of file diff --git a/test_tipc/configs/DeiT/DeiT_base_patch16_224_train_amp_infer_python.txt b/test_tipc/configs/DeiT/DeiT_base_patch16_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..735c918ca124d398b7972f7ea5fd037817051d01 --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_base_patch16_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DeiT_base_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DeiT/DeiT_base_patch16_224_train_infer_python.txt b/test_tipc/configs/DeiT/DeiT_base_patch16_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb44f6a7380239828bcdfa6bca91aab87af2c0a3 --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_base_patch16_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DeiT_base_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DeiT/DeiT_base_patch16_384_train_amp_infer_python.txt b/test_tipc/configs/DeiT/DeiT_base_patch16_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..821d97de81708a321ce07733b55745e40cbd6755 --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_base_patch16_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DeiT_base_patch16_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DeiT/DeiT_base_patch16_384_train_infer_python.txt b/test_tipc/configs/DeiT/DeiT_base_patch16_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b6091739d7530fcb86d8d8b7acc0c2e781515c47 --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_base_patch16_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DeiT_base_patch16_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_base_patch16_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/configs/DeiT/DeiT_small_patch16_224_train_amp_infer_python.txt b/test_tipc/configs/DeiT/DeiT_small_patch16_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..04bafcac45a3c3231baec6ebe406faf92321d66d --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_small_patch16_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DeiT_small_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DeiT/DeiT_small_patch16_224_train_infer_python.txt b/test_tipc/configs/DeiT/DeiT_small_patch16_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d6e4b3a277e6178f6b788730a00379cc1d86d4f2 --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_small_patch16_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DeiT_small_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_small_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DeiT/DeiT_tiny_patch16_224_train_amp_infer_python.txt b/test_tipc/configs/DeiT/DeiT_tiny_patch16_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..dad76ab80912f8248b0295735fcd84fed8f01a7d --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_tiny_patch16_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DeiT_tiny_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DeiT/DeiT_tiny_patch16_224_train_infer_python.txt b/test_tipc/configs/DeiT/DeiT_tiny_patch16_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0d4efd1c4d1fdf45216b9092f1c16b117f55a43 --- /dev/null +++ b/test_tipc/configs/DeiT/DeiT_tiny_patch16_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DeiT_tiny_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DeiT_tiny_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DenseNet/DenseNet121_train_amp_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet121_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..557791cff6b26a81d251e8011350e689ecfe484b --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet121_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DenseNet121 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DenseNet/DenseNet121_train_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet121_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1532be0a281bf7dd57bab92c406b9ff76516df1 --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet121_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DenseNet121 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet121.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DenseNet/DenseNet161_train_amp_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet161_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8bd9feaa9844cc2291cfbb428c6100738805a9f --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet161_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DenseNet161 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DenseNet/DenseNet161_train_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet161_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..34e744efd7df47ab775ecd2941b9f78cf5c56e42 --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet161_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DenseNet161 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet161.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DenseNet/DenseNet169_train_amp_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet169_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..177993bd3c27d325ec925e13c27e1d8aba523149 --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet169_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DenseNet169 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DenseNet/DenseNet169_train_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet169_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..85e996fbfda4b3c86084475c43f2251d27906e73 --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet169_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DenseNet169 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet169.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DenseNet/DenseNet201_train_amp_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet201_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d85e34ee9120cb870574f14e54b682711b78ca8 --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet201_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DenseNet201 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DenseNet/DenseNet201_train_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet201_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..623d3f079c0038dbfaa856510903f206a0849f1b --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet201_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DenseNet201 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet201.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/DenseNet/DenseNet264_train_amp_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet264_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f24a6e7706f6053702db2835e9984a4a60ef247f --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet264_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:DenseNet264 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/DenseNet/DenseNet264_train_infer_python.txt b/test_tipc/configs/DenseNet/DenseNet264_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..561348f693d50f0aa23d6c687916e2d5aca943fe --- /dev/null +++ b/test_tipc/configs/DenseNet/DenseNet264_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DenseNet264 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DenseNet/DenseNet264.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Distillation/resnet34_distill_resnet18_dkd_train_amp_infer_python.txt b/test_tipc/configs/Distillation/resnet34_distill_resnet18_dkd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..64c2383f0fd9f3725b79e5fa4e20d4b6d7e141c9 --- /dev/null +++ b/test_tipc/configs/Distillation/resnet34_distill_resnet18_dkd_train_amp_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DistillationModel +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.lr.learning_rate=0.02 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/Distillation/resnet34_distill_resnet18_dkd_train_infer_python.txt b/test_tipc/configs/Distillation/resnet34_distill_resnet18_dkd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..081146d98ea52ea8a8c0e3fbdc7d93cd2b700137 --- /dev/null +++ b/test_tipc/configs/Distillation/resnet34_distill_resnet18_dkd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:DistillationModel +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Optimizer.lr.learning_rate=0.02 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Distillation/resnet34_distill_resnet18_dkd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/ESNet/ESNet_x0_25_train_amp_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x0_25_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae8ac4a664a541e653484771c3b9fe45c0316e0b --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x0_25_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ESNet_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ESNet/ESNet_x0_25_train_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x0_25_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1dbbe13aba69d1692fd27a35ff609ced2afe075b --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x0_25_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ESNet_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ESNet/ESNet_x0_5_train_amp_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x0_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f57de7ad5c595ff41cfec8cb4738bb9eb541830 --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x0_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ESNet_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ESNet/ESNet_x0_5_train_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0347d06e2edf1e2b1d31b24258bd4d3f1bf4eaf8 --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x0_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ESNet_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ESNet/ESNet_x0_75_train_amp_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x0_75_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fd5a5cdb49a16805dc75932dfffed8b9c6d51d99 --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x0_75_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ESNet_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ESNet/ESNet_x0_75_train_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x0_75_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7dddf978cf8b806145d93b154833869543752f94 --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x0_75_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ESNet_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ESNet/ESNet_x1_0_train_amp_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x1_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..040c6a6576dd5398ff70335359ff73ad141472fe --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x1_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ESNet_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ESNet/ESNet_x1_0_train_infer_python.txt b/test_tipc/configs/ESNet/ESNet_x1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4e958f885196303a8a43fbe78f6c1b2688b6890 --- /dev/null +++ b/test_tipc/configs/ESNet/ESNet_x1_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ESNet_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ESNet/ESNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ESNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB0_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3562d1c147823f52b0a6c52f30d3811db4dfae21 --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB0_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..65b88ebf8d754a8ab3b31f7cd316da39a5da97ea --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB1_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB1_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8518e8b8cdc02f2c031538c76c700551c41e243e --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB1_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB1 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB1_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=272 -o PreProcess.transform_ops.1.CropImage.size=240 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB1_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB1_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..54bae3aeb0e7b53cceb039b7cceb5478e1e059d9 --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB1_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB1 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB1.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB1_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=272 -o PreProcess.transform_ops.1.CropImage.size=240 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,240,240]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB2_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB2_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6a74b4d103dd9fc8576ba2e7441091da1bf89f3e --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB2_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB2 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB2_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=260 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB2_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB2_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..128ad101a8bea6e6d8300ae6393ee33aa6ba8dc8 --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB2_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB2 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB2.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB2_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=260 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,260,260]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB3_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB3_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..202fe8449805a56511281725f55eeb37307fc3bc --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB3_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=332 -o PreProcess.transform_ops.1.CropImage.size=300 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB3_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB3_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..38e06d913bb64945fdfa1ff39bdcd9bc392298e0 --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB3_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=332 -o PreProcess.transform_ops.1.CropImage.size=300 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,300,300]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB4_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB4_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3f529f2ff1290dd1a3a1c752059f7752df83986 --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB4_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB4 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB4_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=412 -o PreProcess.transform_ops.1.CropImage.size=380 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB4_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB4_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..175d10d31acf6ddd2dca9e206fdc163cbd32837c --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB4_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB4 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB4.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB4_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=412 -o PreProcess.transform_ops.1.CropImage.size=380 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,380,380]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB5_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..66512927b9e39948ed191f42d9d8fcfc37cc345e --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=488 -o PreProcess.transform_ops.1.CropImage.size=456 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB5_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c765f48be8ee77f18e39daf24665e1f199ccded --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=488 -o PreProcess.transform_ops.1.CropImage.size=456 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,456,456]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB6_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB6_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff0daf28e432b91527a6425a1232d459ffae00fd --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB6_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB6 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB6_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=560 -o PreProcess.transform_ops.1.CropImage.size=528 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB6_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB6_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f22679b22596116d5729298ce8f6b7c56baead73 --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB6_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB6 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB6.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB6_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=560 -o PreProcess.transform_ops.1.CropImage.size=528 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,528,528]}] \ No newline at end of file diff --git a/test_tipc/configs/EfficientNet/EfficientNetB7_train_amp_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB7_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..abbdf2c4e09935187fdf87eaf1623d0851e6fc81 --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB7_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:EfficientNetB7 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=632 -o PreProcess.transform_ops.1.CropImage.size=600 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/EfficientNet/EfficientNetB7_train_infer_python.txt b/test_tipc/configs/EfficientNet/EfficientNetB7_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..49ee48e40b7c2439086d8ad3d0bee3581be610ab --- /dev/null +++ b/test_tipc/configs/EfficientNet/EfficientNetB7_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:EfficientNetB7 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/EfficientNet/EfficientNetB7.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/EfficientNetB7_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=632 -o PreProcess.transform_ops.1.CropImage.size=600 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,600,600]}] \ No newline at end of file diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_custom_sampler.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_custom_sampler.txt similarity index 100% rename from test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_custom_sampler.txt rename to test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_custom_sampler.txt diff --git a/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b76d5d1b626106d4a1420a74148621eace5c09fd --- /dev/null +++ b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_rec.py -c configs/inference_rec.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.rec_inference_model_dir:../inference +-o Global.infer_imgs:../dataset/Aliproduct/demo_test/ +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..fdcc052e9bd53af443095586ab858af0049bd75b --- /dev/null +++ b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_rec.py -c configs/inference_rec.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.rec_inference_model_dir:../inference +-o Global.infer_imgs:../dataset/Aliproduct/demo_test/ +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c433838faa65875fdb48679ae7fca47466a2f5c9 --- /dev/null +++ b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_rec.py -c configs/inference_rec.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.rec_inference_model_dir:../inference +-o Global.infer_imgs:../dataset/Aliproduct/demo_test/ +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_multicard_eval.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_multicard_eval.txt similarity index 100% rename from test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_multicard_eval.txt rename to test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_multicard_eval.txt diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_no_eval.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_no_eval.txt similarity index 100% rename from test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_no_eval.txt rename to test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_no_eval.txt diff --git a/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_pact_infer_python.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f557b258f42d7a793825e5a237de960a8c2e1fd8 --- /dev/null +++ b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_pact_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.004 -o Global.pretrained_model="pretrained_model/general_PPLCNet_x2_5_pretrained_v1.0" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/pretrain/general_PPLCNet_x2_5_pretrained_v1.0.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_rec.py -c configs/inference_rec.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.rec_inference_model_dir:../inference +-o Global.infer_imgs:../dataset/Aliproduct/demo_test/ +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_ptq_infer_python.txt b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5863d8c06a2647b2c52bee77fac02368023f52c --- /dev/null +++ b/test_tipc/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_ptq_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.save_inference_dir=./general_PPLCNet_x2_5_lite_v1.0_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +infer_model:./general_PPLCNet_x2_5_lite_v1.0_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_rec.py -c configs/inference_rec.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.rec_inference_model_dir:../inference +-o Global.infer_imgs:../dataset/Aliproduct/demo_test/ +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/GhostNet/GhostNet_x0_5_train_amp_infer_python.txt b/test_tipc/configs/GhostNet/GhostNet_x0_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..22a418d8c066e76eb9d0c59b7b399d823dfb8e68 --- /dev/null +++ b/test_tipc/configs/GhostNet/GhostNet_x0_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:GhostNet_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/GhostNet/GhostNet_x0_5_train_infer_python.txt b/test_tipc/configs/GhostNet/GhostNet_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..10ed44102759c32a5293b15c9070ba07112621b6 --- /dev/null +++ b/test_tipc/configs/GhostNet/GhostNet_x0_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GhostNet_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/GhostNet/GhostNet_x1_0_train_amp_infer_python.txt b/test_tipc/configs/GhostNet/GhostNet_x1_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e537c5181481b8e254f4eda919c282d67c75f8f0 --- /dev/null +++ b/test_tipc/configs/GhostNet/GhostNet_x1_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:GhostNet_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/GhostNet/GhostNet_x1_0_train_infer_python.txt b/test_tipc/configs/GhostNet/GhostNet_x1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..804eaaa5159323d011f1cd7c1877b561ba746651 --- /dev/null +++ b/test_tipc/configs/GhostNet/GhostNet_x1_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GhostNet_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/GhostNet/GhostNet_x1_3_train_amp_infer_python.txt b/test_tipc/configs/GhostNet/GhostNet_x1_3_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..92c7071c77574214c471ecb86cc8787c3ab9280c --- /dev/null +++ b/test_tipc/configs/GhostNet/GhostNet_x1_3_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:GhostNet_x1_3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/GhostNet/GhostNet_x1_3_train_infer_python.txt b/test_tipc/configs/GhostNet/GhostNet_x1_3_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..769761b16207bc190b335ac2ebf7cb60b46a1259 --- /dev/null +++ b/test_tipc/configs/GhostNet/GhostNet_x1_3_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GhostNet_x1_3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/GhostNet/GhostNet_x1_3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GhostNet_x1_3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HRNet/HRNet_W18_C_train_amp_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W18_C_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5c6d572cc811652f92030a1f4c2a47cb42c303a --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W18_C_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HRNet_W18_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HRNet/HRNet_W18_C_train_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W18_C_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..adda057a351ed80af464d5d08ff9c86f0753ed37 --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W18_C_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HRNet_W18_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W18_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HRNet/HRNet_W30_C_train_amp_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W30_C_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..68bff780a562f5308938a54d55abf8f8abdf5fba --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W30_C_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HRNet_W30_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W30_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HRNet/HRNet_W30_C_train_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W30_C_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6bd6103855191e0ee2684fb0b5970279cb159d56 --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W30_C_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HRNet_W30_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W30_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W30_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HRNet/HRNet_W32_C_train_amp_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W32_C_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3913f4c43b19db9ae8ed81967f4524d4c5316c0d --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W32_C_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HRNet_W32_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W32_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HRNet/HRNet_W32_C_train_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W32_C_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..17f3f7869dfb800572ab3605518bdcad959f591c --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W32_C_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HRNet_W32_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W32_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W32_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HRNet/HRNet_W40_C_train_amp_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W40_C_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b4f4fc45ec6f7797e185e5172b9c28167831a724 --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W40_C_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HRNet_W40_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W40_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HRNet/HRNet_W40_C_train_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W40_C_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e86fcf6b5e88651c0c05d81ad22589a975b7c1d4 --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W40_C_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HRNet_W40_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W40_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W40_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HRNet/HRNet_W44_C_train_amp_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W44_C_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..05c2e0d0d5a44ed4c4b72cce43e67d01303b7d8d --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W44_C_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HRNet_W44_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W44_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HRNet/HRNet_W44_C_train_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W44_C_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..9abc3eef09499643dd2ab66733d93773d798a9aa --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W44_C_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HRNet_W44_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W44_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W44_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HRNet/HRNet_W48_C_train_amp_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W48_C_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..cd8e05db12674e46d61cde372920d56262c9b64c --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W48_C_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HRNet_W48_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HRNet/HRNet_W48_C_train_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W48_C_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c83d9a6e8b3adb5a091a4d72c23f22c004a44acb --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W48_C_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:HRNet_W48_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W48_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W48_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HRNet/HRNet_W64_C_train_amp_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W64_C_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2784cc488f64425b857e43175beabc21d1a2209c --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W64_C_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HRNet_W64_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HRNet/HRNet_W64_C_train_infer_python.txt b/test_tipc/configs/HRNet/HRNet_W64_C_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb7b082574906433c947ca3b6ecc619076641c96 --- /dev/null +++ b/test_tipc/configs/HRNet/HRNet_W64_C_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HRNet_W64_C +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W64_C.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/HRNet_W64_C_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HarDNet/HarDNet39_ds_train_amp_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet39_ds_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae9c892c38dccde60e4fc6eb9cac24c14a422e4c --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet39_ds_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HarDNet39_ds +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet39_ds_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HarDNet/HarDNet39_ds_train_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet39_ds_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f57b052cfa3726ac5716f521038b5a03867f4e6a --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet39_ds_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HarDNet39_ds +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet39_ds.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet39_ds_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HarDNet/HarDNet68_ds_train_amp_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet68_ds_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..811a037c223f4ce658dce1c03ca32d4793b3d14c --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet68_ds_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HarDNet68_ds +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_ds_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HarDNet/HarDNet68_ds_train_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet68_ds_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..098150b779eefdb9ba8bb3ff8badf40766618111 --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet68_ds_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HarDNet68_ds +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68_ds.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_ds_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HarDNet/HarDNet68_train_amp_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet68_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..abc5ed5c449f190db51f4a217b49c59ff29f163f --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet68_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HarDNet68 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HarDNet/HarDNet68_train_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet68_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7e01a15cc05a3fc5442ded814d7587d19760cfd7 --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet68_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HarDNet68 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet68.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet68_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/HarDNet/HarDNet85_train_amp_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet85_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1bd92977606039a2d94f7dc98296246e6d41fcec --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet85_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:HarDNet85 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/HarDNet/HarDNet85_train_infer_python.txt b/test_tipc/configs/HarDNet/HarDNet85_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..963469c374bec3af0000f1c4bf3b1033381ed415 --- /dev/null +++ b/test_tipc/configs/HarDNet/HarDNet85_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:HarDNet85 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HarDNet/HarDNet85.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/HarDNet85_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Inception/GoogLeNet_train_amp_infer_python.txt b/test_tipc/configs/Inception/GoogLeNet_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b436eb15b85b2bb5d9c08621c0b214450bab838 --- /dev/null +++ b/test_tipc/configs/Inception/GoogLeNet_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:GoogLeNet +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.lr.learning_rate=0.0001 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Inception/GoogLeNet_train_infer_python.txt b/test_tipc/configs/Inception/GoogLeNet_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0479d983608aba0991ad62ff80538fbdfa991176 --- /dev/null +++ b/test_tipc/configs/Inception/GoogLeNet_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GoogLeNet +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Optimizer.lr.learning_rate=0.0001 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/GoogLeNet.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Inception/InceptionV3_train_amp_infer_python.txt b/test_tipc/configs/Inception/InceptionV3_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3aa8d82319fdf2df7d13a0488355dc703c67b35a --- /dev/null +++ b/test_tipc/configs/Inception/InceptionV3_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:InceptionV3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Inception/InceptionV3_train_infer_python.txt b/test_tipc/configs/Inception/InceptionV3_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef4ce7f09f0d5f0c090ef0d4d39e72e490de9952 --- /dev/null +++ b/test_tipc/configs/Inception/InceptionV3_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:InceptionV3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/configs/Inception/InceptionV4_train_amp_infer_python.txt b/test_tipc/configs/Inception/InceptionV4_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..058da3dd226ee92a0687a29c76de8d92e1fdd721 --- /dev/null +++ b/test_tipc/configs/Inception/InceptionV4_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:InceptionV4 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Inception/InceptionV4_train_infer_python.txt b/test_tipc/configs/Inception/InceptionV4_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c6018e22b529b8cfd52582ef72bcc8739f1ea72 --- /dev/null +++ b/test_tipc/configs/Inception/InceptionV4_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:InceptionV4 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Inception/InceptionV4.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV4_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/configs/LeViT/LeViT_128S_train_amp_infer_python.txt b/test_tipc/configs/LeViT/LeViT_128S_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..78120bc8e00b2ecfe7626662a236f3826bdd5cb4 --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_128S_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:LeViT_128S +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128S_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/LeViT/LeViT_128S_train_infer_python.txt b/test_tipc/configs/LeViT/LeViT_128S_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..682f806c8e1d1bf7503faa6ff2818b7f7bf1a57d --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_128S_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:LeViT_128S +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128S_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/LeViT/LeViT_128_train_amp_infer_python.txt b/test_tipc/configs/LeViT/LeViT_128_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..34ff908a633f8d4fad0d9f59aa1fdd7289f521f7 --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_128_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:LeViT_128 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/LeViT/LeViT_128_train_infer_python.txt b/test_tipc/configs/LeViT/LeViT_128_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..05dc2c62f48c584cd3c99a6a2ff8254ba454f89b --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_128_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:LeViT_128 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/LeViT/LeViT_192_train_amp_infer_python.txt b/test_tipc/configs/LeViT/LeViT_192_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c4e6feea186ca22e29b9a410743c43df4de63b4 --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_192_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:LeViT_192 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_192_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/LeViT/LeViT_192_train_infer_python.txt b/test_tipc/configs/LeViT/LeViT_192_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b05156d40f0b20b33e70e4d78b82b36d53740c9b --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_192_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:LeViT_192 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_192.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_192_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/LeViT/LeViT_256_train_amp_infer_python.txt b/test_tipc/configs/LeViT/LeViT_256_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..65d718e2ae26b15ec802c9f624ed8867f2be15a6 --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_256_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:LeViT_256 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_256_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/LeViT/LeViT_256_train_infer_python.txt b/test_tipc/configs/LeViT/LeViT_256_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5236931f04adcb4af3340cab54410670311a5e19 --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_256_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:LeViT_256 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_256.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_256_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/LeViT/LeViT_384_train_amp_infer_python.txt b/test_tipc/configs/LeViT/LeViT_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..14ea9bf91db668f1f3316a4484d27162433d5918 --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:LeViT_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/LeViT/LeViT_384_train_infer_python.txt b/test_tipc/configs/LeViT/LeViT_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb423513063857dda73000b196f04fb3f710f40a --- /dev/null +++ b/test_tipc/configs/LeViT/LeViT_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:LeViT_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MixNet/MixNet_L_train_amp_infer_python.txt b/test_tipc/configs/MixNet/MixNet_L_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..946f448c64aa473864aafa1b1db3e12f8677cd62 --- /dev/null +++ b/test_tipc/configs/MixNet/MixNet_L_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MixNet_L +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MixNet/MixNet_L_train_infer_python.txt b/test_tipc/configs/MixNet/MixNet_L_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..66e9170f25a7301ecbbcf6404a6a3f155091fdfe --- /dev/null +++ b/test_tipc/configs/MixNet/MixNet_L_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MixNet_L +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_L.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_L_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MixNet/MixNet_M_train_amp_infer_python.txt b/test_tipc/configs/MixNet/MixNet_M_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2304e5dcbe9261162bcdce183ef2523cf6509a85 --- /dev/null +++ b/test_tipc/configs/MixNet/MixNet_M_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MixNet_M +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MixNet/MixNet_M_train_infer_python.txt b/test_tipc/configs/MixNet/MixNet_M_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4cc7b49e50e91ca5b23b1b423fac815cf8b57685 --- /dev/null +++ b/test_tipc/configs/MixNet/MixNet_M_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MixNet_M +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_M.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_M_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MixNet/MixNet_S_train_amp_infer_python.txt b/test_tipc/configs/MixNet/MixNet_S_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..94fa68ab2f8bf433656bdaecd924541b24266202 --- /dev/null +++ b/test_tipc/configs/MixNet/MixNet_S_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MixNet_S +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_S_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MixNet/MixNet_S_train_infer_python.txt b/test_tipc/configs/MixNet/MixNet_S_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..158f012086e8997311d9ff34165df151be826e6e --- /dev/null +++ b/test_tipc/configs/MixNet/MixNet_S_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MixNet_S +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MixNet/MixNet_S.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MixNet_S_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..78401c81858400c8107db9fc0567a0fcf5aa18de --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV1 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_train_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3353451fca1667de83a84b4b9137763caa1a3e81 --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileNetV1 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_x0_25_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_25_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..02b8e8a1dc1c7f69ab0a57da62013a0083fd51fd --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_25_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV1_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_x0_25_train_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_25_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ad8d79eb654a349d3eee01c9819ec086e7a3253 --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_25_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV1_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_x0_5_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d1e25d31fccaaf6edd40b5a07965fbf4450ab36d --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV1_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_x0_5_train_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2f01610ea08e6c19443a07a585fb1dc839ae5f5 --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV1_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_x0_75_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_75_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d518b311b9621b6f3b67c3b0b1b1cb46c0aaa2b1 --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_75_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV1_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV1/MobileNetV1_x0_75_train_infer_python.txt b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_75_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..643eaf800094d44291f7edc946bcb6781d437b94 --- /dev/null +++ b/test_tipc/configs/MobileNetV1/MobileNetV1_x0_75_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV1_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e60caec71107e4a174e9fb2c2d5a62f595c5df61 --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV2 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_train_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ff01e8aaa8a371f50486afffacbaac083a567be --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileNetV2 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x0_25_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_25_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..53e4e37dce425aa76b76c011e07c55a471def269 --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_25_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV2_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x0_25_train_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_25_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..202e33996a55e096355a43f1911c224a23501c6f --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_25_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV2_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x0_5_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2373929e35fdf9426fd95ad6660e0aa9341bdbaf --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV2_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x0_5_train_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..38419a97f0ae73cb5676139c9b7a33b5dca64b42 --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV2_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x0_75_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_75_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f35a9554f899a13487bff08b25a9b0d6645ba29 --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_75_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV2_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x0_75_train_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_75_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2ccfad745b1994a919602781f03107552da6e242 --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x0_75_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV2_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x1_5_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x1_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..82739e8e6dd1496f17afca834e7beeaa18b9b6a0 --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x1_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV2_x1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x1_5_train_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x1_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d67569b4611ad8664b1cc2724ecbac9c9492d08d --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x1_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV2_x1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x2_0_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x2_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..66a074ddb15f7995a509d0316e3d46c753d7428f --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x2_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV2_x2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV2/MobileNetV2_x2_0_train_infer_python.txt b/test_tipc/configs/MobileNetV2/MobileNetV2_x2_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c86f61c97a83024f3739aaba7f3883e02da0df87 --- /dev/null +++ b/test_tipc/configs/MobileNetV2/MobileNetV2_x2_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileNetV2_x2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_35_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_35_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1fc43b3253310ec80110df3596f37f053797364 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_35_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x0_35 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_35_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_35_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c7b570a3d532b3a431ccf4ad276de396616bdf00 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_35_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x0_35 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_5_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..98baf3c51b6830e7a17195c5af3357bce2e16e4c --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_5_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_5_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1fe9714aa20e77eec532736c02cacb34cc159f2d --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_5_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_75_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_75_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f6bababdb07b46d73f655e6926501102bdfe96fd --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_75_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_75_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_75_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d2c14488f7e3d1d7607e6ecd90a198b01005135 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x0_75_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_FPGM_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_FPGM_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c460be0bec309203cc0331701247eacae3969dbc --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_FPGM_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0_FPGM +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6393d49a2f4b6d0e7228e88f046e738230d0c542 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:MobileNetV3_large_x1_0_KL +cpp_infer_type:cls +cls_inference_model_dir:./MobileNetV3_large_x1_0_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..05efbacc76cbfd69279d658350abca9cfc607bd5 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..32b506afd594b486d6fe759ee2d1556f9b049a8c --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..73392cf3bedb5bf0f8b005d59d0e2862564d10cd --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:MobileNetV3_large_x1_0_PACT +cpp_infer_type:cls +cls_inference_model_dir:./MobileNetV3_large_x1_0_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..53e1b2cad462bfed06a531086ca520dc2399a764 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4f6ce22a04468528956f283d8496cab27dfe474 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..54af86fc31128b9c2a24cd02fa2ffa403a2c720c --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_PACT_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0_PACT +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb292d2645735fab42f0f81d0a605f6d3b68ffed --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:MobileNetV3_large_x1_0 +cpp_infer_type:cls +cls_inference_model_dir:./MobileNetV3_large_x1_0_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..0212d0c454032bdcf292ac7de78b02c3072f510f --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/MobileNetV3_large_x1_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/MobileNetV3_large_x1_0_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/MobileNetV3_large_x1_0_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a3b777ecccc35bedd56ab7f19882ad8d1cd9b3d --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..344983ad22e524d59089f3e12c496ce4356c2997 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/MobileNetV3_large_x1_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/MobileNetV3_large_x1_0_serving/ +--serving_client:./deploy/paddleserving/MobileNetV3_large_x1_0_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt rename to test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..87c5087581dd66fa44df1834303e89d6e27a5f1c --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./inference +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f90b986281a1da9090f5f0170f1c5d9886e7fbb --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:256|640 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..cac730739540e845107a9c7f579cf3252e3d5f41 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:256|640 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e93924dfd5a587efd54d0bdbb457a7a64e0b20e --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_pact_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5637a8453da256c5c6ac333a1c93de99e51bd40e --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_pact_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.01 -o Global.pretrained_model="pretrained_model/MobileNetV3_large_x1_0_pretrained" +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================train_benchmark_params========================== +batch_size:256|640 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..53628ef5f0bdf54af9feafe58abbe8563330324b --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +fpgm_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./MobileNetV3_large_x1_0_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +infer_model:./MobileNetV3_large_x1_0_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:256|640 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_25_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_25_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c874477f76a11c46df79b65067df02e6c32be778 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_25_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_25_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_25_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..324f872715a3e16a531f7836b28d13fb9e25ac6b --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_25_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_35_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_35_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..01cf34916c90548750fd516d3bc50578804f39eb --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_35_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x0_35 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_35_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_35_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e48831c6b6e0ddc78214c43dbcfc050abc7455d --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_35_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x0_35 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_5_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..590b5fd2c14156c2143a387b5ddc3d134f03e52a --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_5_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_5_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..24c40a1ddc83665fcbc986938278f1d4e277000a --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_5_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_75_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_75_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1dfbfc2d7d97a4e607e5d57785bae6dfab3593d4 --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_75_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_75_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_75_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d99cabf8bae4b3ea1211da5d274adc140055fad --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x0_75_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..da2e44c3a7476089f4231025f62184b652e9a02e --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c07de3e7fb1b2570b1888595d3ff1ad16cb56a1b --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_0_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_25_train_amp_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_25_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5c1c2536d8f668427d01c11d5949182e33e9b74d --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_25_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x1_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_25_train_infer_python.txt b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_25_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..318b1e51ccc69e669e2cf7755922a7460af5fbbd --- /dev/null +++ b/test_tipc/configs/MobileNetV3/MobileNetV3_small_x1_25_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:MobileNetV3_small_x1_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_small_x1_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/MobileViT/MobileViT_S_train_infer_python.txt b/test_tipc/configs/MobileViT/MobileViT_S_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0c54292e2cf7f95a261bc5eb6fcf7cd9d8f1ba26 --- /dev/null +++ b/test_tipc/configs/MobileViT/MobileViT_S_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileViT_S +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_S.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_S.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,256,256]}] diff --git a/test_tipc/configs/MobileViT/MobileViT_XS_train_infer_python.txt b/test_tipc/configs/MobileViT/MobileViT_XS_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d3ea3c90a4da80d1be85606d945f8aca4eded157 --- /dev/null +++ b/test_tipc/configs/MobileViT/MobileViT_XS_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileViT_XS +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XS.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XS.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XS.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,256,256]}] diff --git a/test_tipc/configs/MobileViT/MobileViT_XXS_train_infer_python.txt b/test_tipc/configs/MobileViT/MobileViT_XXS_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..944c90098ebd6bd8f9c00b4cd73dbbabd37f8069 --- /dev/null +++ b/test_tipc/configs/MobileViT/MobileViT_XXS_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:MobileViT_XXS +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XXS.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XXS.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViT/MobileViT_XXS.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=292 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,256,256]}] diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3c6e7ae0db89cc78f8745f1e294ac1f93dedac60 --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_KL +cpp_infer_type:cls +cls_inference_model_dir:./general_PPLCNet_x2_5_lite_v1.0_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..68a3e04510ced8a60bbfd4980673187e153873df --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/general_PPLCNet_x2_5_lite_v1.0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3c036ff8a5cf7ceb5c7e8d755afd4e65f1a9c40 --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/general_PPLCNet_x2_5_lite_v1.0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e40702e0284bb628548d71b8234489729d8e305b --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_PACT +cpp_infer_type:cls +cls_inference_model_dir:./general_PPLCNet_x2_5_lite_v1.0_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e0d91686ebbb876362b4b00c017f221e0d47d09 --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/general_PPLCNet_x2_5_lite_v1.0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4280e44af04e42a6c6c162ae4f24e294c1aa625 --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/general_PPLCNet_x2_5_lite_v1.0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/general_PPLCNet_x2_5_lite_v1.0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_serving/ +--serving_client:./deploy/paddleserving/GeneralRecognition_PPLCNet_x2_5_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..652203bcb5f9569cace42b82daca2421c33be9d4 --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_general_rec_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/general_PPLCNet_x2_5_lite_v1.0_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +inference:./python/predict_rec.py +Global.use_onnx:True +Global.rec_inference_model_dir:./models/general_PPLCNet_x2_5_lite_v1.0_infer +Global.use_gpu:False +-c:configs/inference_rec.yaml \ No newline at end of file diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c8727278e5838cb6cf6273e796dec5071cac4a1d --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,19 @@ +===========================cpp_infer_params=========================== +model_name:PPShiTu +cpp_infer_type:shitu +feature_inference_model_dir:./general_PPLCNet_x2_5_lite_v1.0_infer/ +det_inference_model_dir:./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +det_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar +infer_quant:False +inference_cmd:./deploy/cpp_shitu/build/pp_shitu -c inference_drink.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +data_dir:./dataset/drink_dataset_v1.0 +benchmark:True +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py +transform_index_cmd:python3.7 deploy/cpp_shitu/tools/transform_id_map.py -c inference_drink.yaml diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..5bf0e4c1da933b8b356e34356b10274245f42b9c --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:PPShiTu +python:python3.7 +cls_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +det_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./models/general_PPLCNet_x2_5_lite_v1.0_infer/ +--dirname:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./models/general_PPLCNet_x2_5_lite_v1.0_serving/ +--serving_client:./models/general_PPLCNet_x2_5_lite_v1.0_client/ +--serving_server:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ +--serving_client:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ +serving_dir:./paddleserving/recognition +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8a533e82856bfacb1f2db920f20f5f89383e0d27 --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:PPShiTu +python:python3.7 +cls_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +det_inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./models/general_PPLCNet_x2_5_lite_v1.0_infer/ +--dirname:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./models/general_PPLCNet_x2_5_lite_v1.0_serving/ +--serving_client:./models/general_PPLCNet_x2_5_lite_v1.0_client/ +--serving_server:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ +--serving_client:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/ +serving_dir:./paddleserving/recognition +web_service:recognition_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..bfd24bb4106245d7b279d0e7c07ffbc39f28fe83 --- /dev/null +++ b/test_tipc/configs/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PP-ShiTu_mainbody_det +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/picodet_lcnet_x2_5_640_mainbody_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/picodet_lcnet_x2_5_640_mainbody_infer/inference.onnx +--opset_version:11 +--enable_onnx_checker:True +inference_model_url:https://paddledet.bj.bcebos.com/models/picodet_lcnet_x2_5_640_mainbody_infer.tar +inference:null +Global.use_onnx:null +Global.inference_model_dir:null +Global.use_gpu:null +-c:null \ No newline at end of file diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c962c76190dfd89871e956390cce5bbb7d47937 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPHGNet_small_KL +cpp_infer_type:cls +cls_inference_model_dir:./PPHGNet_small_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6890b2fd9c33e8df199fc5b3c53f7c94a6144e45 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..786fac936cb51c6d75abc70928a1d25b7c54b730 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..cdbf87b7fb698c3729439f15a476f045b69d632b --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPHGNet_small_PACT +cpp_infer_type:cls +cls_inference_model_dir:./PPHGNet_small_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae8cf0920e089a4638f02ffec336ab5c99fc3339 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_pact_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7b770999367863f3057678704b2f671c81688a1c --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPHGNet_small_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_pact_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..78e342fc9c465bd9e83f9a9352a2f1fa04fee509 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPHGNet_small +cpp_infer_type:cls +cls_inference_model_dir:./PPHGNet_small_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b65824c391f796aeb7fb4acf12f38113303b6d7 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPHGNet_small +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPHGNet_small_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPHGNet_small_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPHGNet_small_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b576cb90e3b3fea3fd83d28943e443b8c39a099c --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b576cb90e3b3fea3fd83d28943e443b8c39a099c --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_small +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_small_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_small_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_small_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_train_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..204503e49b27f532c777a8bcb832e21a2d794e39 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8722e03964343d31f9bd4daea803fcc34c849ce --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..310a6a77ca4b2cccee839d291075a62d1af2ca8d --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_train_pact_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..460c9e5248cf1bf7d4b9d76690981f5f907dcfec --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.01 -o Global.pretrained_model="pretrained_model/PPHGNet_small_pretrained" -o AMP=None +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_small_train_ptq_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_small_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba76846bbb3439c4ce879e32d2fef59d571d99da --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_small_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.save_inference_dir=./PPHGNet_small_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar +infer_model:./PPHGNet_small_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..37d48ac17ec320fe54ceb70c2ed119581db2f016 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPHGNet_tiny +cpp_infer_type:cls +cls_inference_model_dir:./PPHGNet_tiny_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b31ff8c60af0c55950f20b0d9ba68ee058be15af --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPHGNet_tiny_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPHGNet_tiny_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPHGNet_tiny_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b4f5e41e110277938bd1d38543dcd5b8d5561d3 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_tiny_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_tiny_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_tiny_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b4f5e41e110277938bd1d38543dcd5b8d5561d3 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPHGNet_tiny_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPHGNet_tiny_serving/ +--serving_client:./deploy/paddleserving/PPHGNet_tiny_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4bf0d64319e46c2b8a4f7e0b7db112ad0ae548a5 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=232 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3764315df2062e3ce82ca6a91cc8127e8c2867d1 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=232 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d383b7b839a859d7bf6a5f74b88b69a20794b37 --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_pact_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2cb3f57524d43d362a0386941b965cd70f78cc8c --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.01 -o Global.pretrained_model="pretrained_model/PPHGNet_tiny_pretrained" -o AMP=None +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPHGNet_tiny_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_ptq_infer_python.txt b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..117da350331019d8ef9b89909598e83d937f67af --- /dev/null +++ b/test_tipc/configs/PPHGNet/PPHGNet_tiny_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_tiny +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_tiny.yaml -o Global.save_inference_dir=./PPHGNet_tiny_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar +infer_model:./PPHGNet_tiny_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..dbd610ae941a79d7cdf484d6ae9a666bd8799d24 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x0_25 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x0_25_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..62dcb630b61880531fc4f2888740fc0ca4251e16 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x0_25_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x0_25_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a36cb9ac1888b0019bde4299f1f4732cedd6a59f --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_25_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_25_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_25_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b1ca66b137b3eecf5759ae7a4af1b44b9f1ac5cc --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_25_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_25_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_25_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_25_lite_arm_cpu_cpp.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x0_25_lite_arm_cpu_cpp.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x0_25_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b5d91c06c06ac476cce150347f273c50f5903f8f --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..21efc54952bf161227d0a84ca5f24dfadc14b40f --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2ccca0fb854c135aebbeb1417501663315b5edb1 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d0dddd4cd5604e30f95d6b080adb7f27f229738 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x0_25_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d287423fe0ea8fb47739dbf3c8c33b61f72a156d --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_25_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_25 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_25.yaml -o Global.save_inference_dir=./PPLCNet_x0_25_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar +infer_model:./PPLCNet_x0_25_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d20b2099cd13cd8919dffdbcbbfc87f1de6febfc --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x0_35 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x0_35_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..427d8cc725cc7fa3f72d4a28c712d1109bcdae88 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x0_35_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x0_35_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x0_35_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f5673d9b583f95e32f2df0f8317a08e980a5bcd4 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_35_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_35_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_35_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f80a40ae603a553b27a63e15661a62946854dff7 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_35_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_35_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_35_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..dbd78bdc6ae67ac8bdabc2ff3cdb4fe5b5de48a0 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4acd0a0aa19b5392cf79e64316676e47ad31134 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..92d940bb146111ad053851ab4ccf9f0ba3bb11d3 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b103129579ff1286417d163cff1c62b29bec1c2f --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x0_35_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_35_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..092a9028064ec8aca0fa9c5a4216610d49b669ac --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_35_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_35 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_35.yaml -o Global.save_inference_dir=./PPLCNet_x0_35_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar +infer_model:./PPLCNet_x0_35_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..9cb7b18735ccf1af81b58505695ec910aa87ebcc --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x0_5 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x0_5_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c70ef6fc0d5a28d333cec103a533eee237bb8351 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x0_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x0_5_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x0_5_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..487d270e07bbfd7503cc4cbde094326c4430825b --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_5_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_5_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3ac4971a5000b8b2d1b4c5c04a02e5e2d9aa2090 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_5_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_5_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_5_lite_arm_cpu_cpp.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x0_5_lite_arm_cpu_cpp.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x0_5_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4764b6bf6ca0df0a4885a92d1293ab763e9e574a --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..178c919d1f28e7f98f4d602fb104e7087b2dfa19 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..9965ba497e786e5e7f8f450dd109456e18b34fde --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2fbcfab4751d4545f0952070b28a403fd46f60a3 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x0_5_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..392d2a1671d64a0ffb8ffe10b6cb76be88096671 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_5_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_5.yaml -o Global.save_inference_dir=./PPLCNet_x0_5_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar +infer_model:./PPLCNet_x0_5_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..72d99a639d929975a21d34c1a07af9dc91e04f9d --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x0_75 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x0_75_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..124648502dbdfd7e4b10890977ab45d8e82e8d5d --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x0_75_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x0_75_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x0_75_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..51b2a8167a78f245c7c3b882e08897d338557881 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_75_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_75_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_75_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..771817b8f1fc28c71e18b3eda89156334192cfdb --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x0_75_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x0_75_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x0_75_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x0_75_lite_arm_cpu_cpp.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x0_75_lite_arm_cpu_cpp.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x0_75_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8efc1299a953256968e69d1f5560b0e1a9bf52d8 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a349b5b098e6ff8b9395f3728cbb4d77dcf96848 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa5a62fa7189b646125a6f0815b4da674cf58880 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fcd01b437be67b59939bde0ccd7f6231804d41bb --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x0_75_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x0_75_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5e732f0650e4302103e73e8999e693780c76edf7 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x0_75_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x0_75 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x0_75.yaml -o Global.save_inference_dir=./PPLCNet_x0_75_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_75_infer.tar +infer_model:./PPLCNet_x0_75_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1489dff0bc7e5103f69df687083fc94e4c863ec1 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x1_0_KL +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x1_0_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d923f9b502268027661b176f84998c00fe9147c0 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e18074aaa7c9ba665a2da5ae1d304a19abd551d --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..77c6c26d1791508f59e7aaa0da22fd7400ba9836 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x1_0_PACT +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x1_0_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e576feb7fab5da4cab826e2bce1845333b8c5b79 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b13025cc526f74dac2ddc472087fac229e0c90e8 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNet_x1_0_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..872ba7acbf50e35b0e2db83582b6e70654dd7412 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x1_0 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x1_0_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d994671b348e90af1ca55b7bd116ed4e53a364c9 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x1_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x1_0_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x1_0_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..39aac394d0e62568ce51547ad0b995e9bbad7851 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e5d7859ee9dd035a5b7f1f0dd45a2c7325dba223 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_0_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_0_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_lite_arm_cpu_cpp.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x1_0_lite_arm_cpu_cpp.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x1_0_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5bbe58d082c854bdfd5345c472e13deb27579bfa --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2fb6c17b0510071f77dbc6de7ddbe61b2e8178ca --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..26165519e27477a4b9cb7c4232b6b238837954ee --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ce5af743e223a8b7efb434de4a1fbf786a42be0 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x1_0_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f38e567fea950a4366255848f9f90cc58c6bd687 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.save_inference_dir=./PPLCNet_x1_0_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar +infer_model:./PPLCNet_x1_0_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..84ced77b48f9ec13a31f705eef71eced5f0f41d7 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x1_5 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x1_5_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f8646f871e247a0f30cefec7e75c6d4b2dc9c44 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x1_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x1_5_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x1_5_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..20cdbd0bbc34126f8d5f5592df911cbebd619387 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_5_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_5_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d32527d825be1887d63cd774e88b1a4adecde3f7 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x1_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x1_5_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x1_5_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_5_lite_arm_cpu_cpp.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x1_5_lite_arm_cpu_cpp.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x1_5_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..17ef3baaba0e532aeff386704e87a7022ae7f166 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4bcc4f0047d0b890284a7e02ab8c2db390e9ff9 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1384c5c226135c9598ae30465d05a6bdf78e994 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e946c9306c58acfb3fcb550ebdb3a370f8122047 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x1_5_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..491e377a60febfe03ca22a312be66c7b61ce9ceb --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x1_5_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_5.yaml -o Global.save_inference_dir=./PPLCNet_x1_5_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_5_infer.tar +infer_model:./PPLCNet_x1_5_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d8353543acdfcb7962a9a1261dc883399fba94e8 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x2_0 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x2_0_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..19336133ab4e70fde77b7c35d8bf03fd2d30258c --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x2_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x2_0_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x2_0_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2a25a3d05f2de962f8194d769f82aa00064aa29 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x2_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x2_0_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x2_0_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8309d5d173d9bd48be77b831c2e27650a612867 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x2_0_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x2_0_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x2_0_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_0_lite_arm_cpu_cpp.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x2_0_lite_arm_cpu_cpp.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x2_0_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f37d1e41c66e43932b9e817edc8306975388e9b2 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..c7c74f07b9c94801405dd16b367cc622e896b7e0 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..246db70d0ae82462cee6c0e68978d82ffe378be8 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..59d11849e0ad8ebd53280bcfc2afdeeac2ee3db2 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x2_0_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a720d4f2e7debd44cb9eaa1b0f0379af95da762 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_0_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_0.yaml -o Global.save_inference_dir=./PPLCNet_x2_0_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_0_infer.tar +infer_model:./PPLCNet_x2_0_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..0aca9e32ee3da4512060657c9c1b982efd18bb4d --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNet_x2_5 +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNet_x2_5_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1e08ad41e51edd0b6b72bf4f4367b9ce6383f537 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNet_x2_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNet_x2_5_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNet_x2_5_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8c340ea7aa9e643fb45dbb426ae6459e88dcd75 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x2_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x2_5_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x2_5_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..269d33acf53dd923182bd1f68263d9efd8f20e9c --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNet_x2_5_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNet_x2_5_serving/ +--serving_client:./deploy/paddleserving/PPLCNet_x2_5_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_5_lite_arm_cpu_cpp.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x2_5_lite_arm_cpu_cpp.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x2_5_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_amp_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d73a29fd4d330164332391bcc7f9b194d6f9d40c --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_amp_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..06f089948e387291117a3ee050bf487792016295 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNet/PPLCNet_x2_5_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/PPLCNet/PPLCNet_x2_5_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8353dafd0c452043bc09010f5386a470f65d8608 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..09341926a8aec978e75cf574f69584ac855706f8 --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_pact_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..309aaeee9859de8ba49cf7c1e586030a785a10cf --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNet_x2_5_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNet_x2_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef91b5f3b921ebfa36b66ebdd7588fe99efed75a --- /dev/null +++ b/test_tipc/configs/PPLCNet/PPLCNet_x2_5_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x2_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x2_5.yaml -o Global.save_inference_dir=./PPLCNet_x2_5_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x2_5_infer.tar +infer_model:./PPLCNet_x2_5_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1700fcd138a7d45f8fc9ee3f95bcf381620504c4 --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNetV2_base_KL +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNetV2_base_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d444589b1808c687533e83849f3b3aae8ff74e8b --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b909c073802ce92000ecaee8c044824ba2a7618b --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_kl_quant_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..085a6c35ca6a5723f73687d8f33886f93702e22a --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNetV2_base_PACT +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNetV2_base_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e050fcbd450cf55b27036c1af08d35109610972 --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0e34f912c40b86332786f6fc92917c9a7bc19d7 --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/PPLCNetV2_base_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_pact_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d6e68233e78f2072e8540170abee2c5ceae6b1ef --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:PPLCNetV2_base +cpp_infer_type:cls +cls_inference_model_dir:./PPLCNetV2_base_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b5047248e98989c3de64f473a2ab7b64084bfe37 --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/PPLCNetV2_base_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/PPLCNetV2_base_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/PPLCNetV2_base_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c355a9dbd734b8c5eab317007cb2ceb310a1a66 --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..402679134a1b0f94fe1d8a67634efcff2772231a --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/PPLCNetV2_base_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/PPLCNetV2_base_serving/ +--serving_client:./deploy/paddleserving/PPLCNetV2_base_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_infer_python.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4931593872beeb1bafa54551637d7f28d3700ece --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.first_bs:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.seed=1234 -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..455573998b811392bab4545e92631000d2680b8e --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.first_bs:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.seed=1234 -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..fd8717b7866592266ff6fa6b5e1f5d1af66b89af --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:null +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.seed=1234 -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_pact_infer_python.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f5f2ed00a645d8cd99ba379e7bc44f7dd183a31e --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_pact_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.first_bs:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.seed=1234 -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.08 -o Global.pretrained_model="pretrained_model/PPLCNetV2_base_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/PPLCNetV2_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_ptq_infer_python.txt b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d77d1ccc1f15e98c45070e868c3bc546fd18521e --- /dev/null +++ b/test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.first_bs:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.seed=1234 -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.save_inference_dir=./PPLCNetV2_base_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar +infer_model:./PPLCNetV2_base_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PVTV2/PVT_V2_B0_train_infer_python.txt b/test_tipc/configs/PVTV2/PVT_V2_B0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..eb029ff5a8d094073d8ab51c713484484e458bb9 --- /dev/null +++ b/test_tipc/configs/PVTV2/PVT_V2_B0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:PVT_V2_B0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B0.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PVTV2/PVT_V2_B1_train_infer_python.txt b/test_tipc/configs/PVTV2/PVT_V2_B1_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c289563f11e84df7cd2829f209553d0aed6c68f7 --- /dev/null +++ b/test_tipc/configs/PVTV2/PVT_V2_B1_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:PVT_V2_B1 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B1.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/PVTV2/PVT_V2_B2_Linear_train_infer_python.txt b/test_tipc/configs/PVTV2/PVT_V2_B2_Linear_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..cc412d47cc56da877fd63775d322787e6878cd71 --- /dev/null +++ b/test_tipc/configs/PVTV2/PVT_V2_B2_Linear_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:PVT_V2_B2_Linear +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B2_Linear.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PVTV2/PVT_V2_B2_train_infer_python.txt b/test_tipc/configs/PVTV2/PVT_V2_B2_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d607cfb8acaa6168be0ece49a6c1506cdc800614 --- /dev/null +++ b/test_tipc/configs/PVTV2/PVT_V2_B2_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:PVT_V2_B2 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B2.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PVTV2/PVT_V2_B3_train_infer_python.txt b/test_tipc/configs/PVTV2/PVT_V2_B3_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..58585012d816314ce6ecb7edc50eb11f57e374a1 --- /dev/null +++ b/test_tipc/configs/PVTV2/PVT_V2_B3_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:PVT_V2_B3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B3.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PVTV2/PVT_V2_B4_train_infer_python.txt b/test_tipc/configs/PVTV2/PVT_V2_B4_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7dccac5fb2002704cf7c66c8fef763637beac8c1 --- /dev/null +++ b/test_tipc/configs/PVTV2/PVT_V2_B4_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:PVT_V2_B4 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B4.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/PVTV2/PVT_V2_B5_train_infer_python.txt b/test_tipc/configs/PVTV2/PVT_V2_B5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4dc143d00ddf8b2516d019fa224c1b2704b60347 --- /dev/null +++ b/test_tipc/configs/PVTV2/PVT_V2_B5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:PVT_V2_B5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/PVT_V2_B5.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=256 -o PreProcess.transform_ops.1.CropImage.size=224 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/ReXNet/ReXNet_1_0_train_amp_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_1_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c349d563c80a93f34c431211520e9aa172c548c --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_1_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ReXNet_1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ReXNet/ReXNet_1_0_train_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..63045f7d263cbd3d2bbfeee893795e927b953fb7 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_1_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ReXNet_1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ReXNet/ReXNet_1_3_train_amp_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_1_3_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..afe48af41ef1b288e7dca550ba39ce01d561abb9 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_1_3_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ReXNet_1_3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ReXNet/ReXNet_1_3_train_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_1_3_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbfa9ceaeb6352edbc00239edebbc42dd19f7a56 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_1_3_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ReXNet_1_3 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_3.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_3_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ReXNet/ReXNet_1_5_train_amp_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_1_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..32186517c1814b855dcb659a523758a745a1fe70 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_1_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ReXNet_1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ReXNet/ReXNet_1_5_train_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_1_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8a10bad8b67e76a6654388f0d7750f48ca6f8127 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_1_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ReXNet_1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ReXNet/ReXNet_2_0_train_amp_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_2_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..194cda7ada3891efee36a222be109fd61bc8a79e --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_2_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ReXNet_2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ReXNet/ReXNet_2_0_train_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_2_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..34d0649be23f4cc453b513be98d0c94702d217b6 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_2_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ReXNet_2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ReXNet/ReXNet_3_0_train_amp_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_3_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e0a2d325829ed930aa43f1874a8cb205cbf7ce62 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_3_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ReXNet_3_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ReXNet/ReXNet_3_0_train_infer_python.txt b/test_tipc/configs/ReXNet/ReXNet_3_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..862660e91e4150493c775befd22e1e156ee48296 --- /dev/null +++ b/test_tipc/configs/ReXNet/ReXNet_3_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ReXNet_3_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ReXNet/ReXNet_3_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ReXNet_3_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/RedNet/RedNet101_train_amp_infer_python.txt b/test_tipc/configs/RedNet/RedNet101_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..97092296af0cb09e83fdfc6bf192d6f39b94a6de --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet101_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:RedNet101 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/RedNet/RedNet101_train_infer_python.txt b/test_tipc/configs/RedNet/RedNet101_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..908f822f6440078c3287584f5c60ab065d7ea269 --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet101_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:RedNet101 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet101.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet101_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/RedNet/RedNet152_train_amp_infer_python.txt b/test_tipc/configs/RedNet/RedNet152_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..aae976c3f53fbe43e286e28bb5bfedd0489c1b7c --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet152_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:RedNet152 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/RedNet/RedNet152_train_infer_python.txt b/test_tipc/configs/RedNet/RedNet152_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b56b3cdc12b001d1d75ca093162203eb1e7bd1c6 --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet152_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:RedNet152 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet152.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet152_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/RedNet/RedNet26_train_amp_infer_python.txt b/test_tipc/configs/RedNet/RedNet26_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..52f2a82e3d8f92a601deae826e98ffd6c94b9fb6 --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet26_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:RedNet26 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet26_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/RedNet/RedNet26_train_infer_python.txt b/test_tipc/configs/RedNet/RedNet26_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d303cded7a1572fdc6233fd17a1710ae1de477c --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet26_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:RedNet26 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet26.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet26_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/RedNet/RedNet38_train_amp_infer_python.txt b/test_tipc/configs/RedNet/RedNet38_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fbe7a4279a17d165da0613343c6968ddd52918a4 --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet38_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:RedNet38 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet38_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/RedNet/RedNet38_train_infer_python.txt b/test_tipc/configs/RedNet/RedNet38_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..94ab7e71d017ee73e2034a7b8037fb1437f7f7f9 --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet38_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:RedNet38 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet38.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet38_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/RedNet/RedNet50_train_amp_infer_python.txt b/test_tipc/configs/RedNet/RedNet50_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6c3886994c4fba7e20a7d5de6f4bf51eb893ea7 --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet50_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:RedNet50 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/RedNet/RedNet50_train_infer_python.txt b/test_tipc/configs/RedNet/RedNet50_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..85f743fbd95b23fb2ee47492ba109e57a2ee27e7 --- /dev/null +++ b/test_tipc/configs/RedNet/RedNet50_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:RedNet50 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/RedNet/RedNet50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/RedNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Res2Net/Res2Net101_vd_26w_4s_train_amp_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net101_vd_26w_4s_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d84fededc0c1c56f570a947ab8f7186e31b6733d --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net101_vd_26w_4s_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Res2Net101_vd_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net101_vd_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Res2Net/Res2Net101_vd_26w_4s_train_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net101_vd_26w_4s_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..9e34d43edd055570095a78fd72cebea36875eec2 --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net101_vd_26w_4s_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Res2Net101_vd_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net101_vd_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net101_vd_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Res2Net/Res2Net200_vd_26w_4s_train_amp_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net200_vd_26w_4s_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0eedea3c5d0be6a6b1bf9dfcf58f154769a205df --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net200_vd_26w_4s_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Res2Net200_vd_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Res2Net/Res2Net200_vd_26w_4s_train_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net200_vd_26w_4s_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b656359e083a7fb0f647e027e2365c13303237e --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net200_vd_26w_4s_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Res2Net200_vd_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net200_vd_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Res2Net/Res2Net50_14w_8s_train_amp_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net50_14w_8s_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0dd1128ddb8a39271aa4aaa9cc0a5bb4266e754c --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net50_14w_8s_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Res2Net50_14w_8s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_14w_8s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Res2Net/Res2Net50_14w_8s_train_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net50_14w_8s_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d2951f84ba5e3a3b272381a2e6d869337f454755 --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net50_14w_8s_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Res2Net50_14w_8s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_14w_8s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_14w_8s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Res2Net/Res2Net50_26w_4s_train_amp_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net50_26w_4s_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..db8eaeac6709d421ee923bd8da65295ad8f044df --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net50_26w_4s_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Res2Net50_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Res2Net/Res2Net50_26w_4s_train_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net50_26w_4s_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5430c97b61e83e52587d27b488979948db2d1c46 --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net50_26w_4s_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Res2Net50_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Res2Net/Res2Net50_vd_26w_4s_train_amp_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net50_vd_26w_4s_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8ea5320763246e51a934b70e4c2ebb36a8c8c4c4 --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net50_vd_26w_4s_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Res2Net50_vd_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_vd_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Res2Net/Res2Net50_vd_26w_4s_train_infer_python.txt b/test_tipc/configs/Res2Net/Res2Net50_vd_26w_4s_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6792698d78fb986244ab59475077bb66e3af934d --- /dev/null +++ b/test_tipc/configs/Res2Net/Res2Net50_vd_26w_4s_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Res2Net50_vd_26w_4s +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Res2Net/Res2Net50_vd_26w_4s.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net50_vd_26w_4s_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeSt/ResNeSt50_fast_1s1x64d_train_amp_infer_python.txt b/test_tipc/configs/ResNeSt/ResNeSt50_fast_1s1x64d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..230fd565c31c82ea2b5acdead5d40ab46854c6d6 --- /dev/null +++ b/test_tipc/configs/ResNeSt/ResNeSt50_fast_1s1x64d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeSt50_fast_1s1x64d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_fast_1s1x64d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeSt/ResNeSt50_fast_1s1x64d_train_infer_python.txt b/test_tipc/configs/ResNeSt/ResNeSt50_fast_1s1x64d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..427dff09071c7bd164f1811767b642616e3658b5 --- /dev/null +++ b/test_tipc/configs/ResNeSt/ResNeSt50_fast_1s1x64d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeSt50_fast_1s1x64d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50_fast_1s1x64d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_fast_1s1x64d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeSt/ResNeSt50_train_amp_infer_python.txt b/test_tipc/configs/ResNeSt/ResNeSt50_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..add120cf64fa158817f139072fafe0cfa8d2bef0 --- /dev/null +++ b/test_tipc/configs/ResNeSt/ResNeSt50_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeSt50 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeSt/ResNeSt50_train_infer_python.txt b/test_tipc/configs/ResNeSt/ResNeSt50_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c7c85608d35b78c9ad4732aef245f79cfd24272 --- /dev/null +++ b/test_tipc/configs/ResNeSt/ResNeSt50_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeSt50 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeSt/ResNeSt50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeSt50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_32x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..785ff5444132e3a821d1a3c7f45a21d88a147df6 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt101_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_32x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..25af7d0fe70366eef2c4eab6f5f9de7b321c1dbc --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt101_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_64x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_64x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ddf7b281b937ce6308dd12d5a37629fed1fdb353 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_64x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt101_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_64x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_64x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca6c83ca9fb79a195f23e0a550a28bc33291e3c9 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_64x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt101_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_vd_32x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_vd_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..476527eb4a8880f5561af21ac1f2ac89a3fe5c33 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_vd_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt101_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_vd_32x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_vd_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa36f0230f9eb9f1b58e41ffe4ea0f23af8e88d7 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_vd_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt101_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_vd_64x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_vd_64x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..9599b092632de6b6dc0f2c8d793154de7e25f795 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_vd_64x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt101_vd_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt101_vd_64x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt101_vd_64x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6639eeda89af3445768a5014049781ab9c0ba27e --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt101_vd_64x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt101_vd_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_vd_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_vd_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_32x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..785ff5444132e3a821d1a3c7f45a21d88a147df6 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt101_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_32x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..25af7d0fe70366eef2c4eab6f5f9de7b321c1dbc --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt101_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt101_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_64x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_64x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..91d1de89dd9374c8bac2ed6436967ddb237962ee --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_64x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt152_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_64x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_64x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e378c2665bdfcacb130bfc6a8612765f267ff9de --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_64x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt152_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_vd_32x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_vd_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..65036bd7ce009760658a91b81a0369feec8e3541 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_vd_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt152_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_vd_32x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_vd_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf25de03496f9affe2e5d53a2e01c6ed22f61eb1 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_vd_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt152_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_vd_64x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_vd_64x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff639bc82eed88b9aa967114e93233a23cd9de0 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_vd_64x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt152_vd_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt152_vd_64x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt152_vd_64x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f881c4a0739b376d0891f4267f5e12011656e13d --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt152_vd_64x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt152_vd_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt152_vd_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt152_vd_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_32x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..243e52ee692a6f89a430a7b381e8f31304eac015 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt50_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_32x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..50a2884e621d0568649428ca3ff993ce2d960938 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt50_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_64x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_64x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..785be2abe4e26dcc18317e1498abb92ee106ea14 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_64x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt50_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_64x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_64x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a616cd4e608e1b6773591340aa7a5532e545f02 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_64x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt50_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_vd_32x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_vd_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1e63692ccde03df9b0ebb36d53557377fa40e79 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_vd_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt50_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_vd_32x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_vd_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2e3d4eefccd38a4bc2850fb12657eeb9bcf87b9c --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_vd_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt50_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_vd_64x4d_train_amp_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_vd_64x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ed4b57e4d655090312ef34a4f04867f0bc2d4859 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_vd_64x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNeXt50_vd_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNeXt/ResNeXt50_vd_64x4d_train_infer_python.txt b/test_tipc/configs/ResNeXt/ResNeXt50_vd_64x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..dfa8229ecb94c3fd306bd59e45429be9d07ecf12 --- /dev/null +++ b/test_tipc/configs/ResNeXt/ResNeXt50_vd_64x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNeXt50_vd_64x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNeXt/ResNeXt50_vd_64x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt50_vd_64x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet101_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet101_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d18b20d09b5fdb29648ed27b410ce291d53404bb --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet101_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet101 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet101_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet101_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7fb1980d3f9c37a006caa6eaec225649376d66ee --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet101_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet101 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet101_vd_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet101_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..342f9830be42157968c87d74695cf4040d168744 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet101_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet101_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet101_vd_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet101_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..87f553159d87e51c8403e0f7566af80e4311e82b --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet101_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet101_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet101_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet101_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet152_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet152_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..74f20ff3ef7981fce96ca4a66c14064b244b3ea3 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet152_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet152 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet152_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet152_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2bd9c756152c1ebd889002c1f7cf2488845c122f --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet152_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet152 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:32 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet152_vd_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet152_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..267735350dc4e1bd590ce4cad721188f1846b6f0 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet152_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet152_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet152_vd_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet152_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..bd0f45b97aeba5ae1b83791d1097c1835496f61f --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet152_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet152_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet152_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet152_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet18_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet18_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a38c72c0c3bab0dc55a7927778a790554b9d97c6 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet18_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet18 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet18_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet18_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce5977cd829eeae3c1644397fd0206d57963db66 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet18_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet18 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet18_vd_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet18_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d0adc2b7f7f5edf68dfd3e68704aedac90cc1a57 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet18_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet18_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet18_vd_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet18_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..18138b9c6cef68b4104d79455fb8f4adffddea92 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet18_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet18_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet18_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet18_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet200_vd_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet200_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1b33fcee7764d7d24609bdc9c1eed7d0350e137 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet200_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet200_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet200_vd_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet200_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..62c4aa6fa6d461845099402cd3adce79f84dd901 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet200_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet200_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet200_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet200_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet34_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet34_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f35bb23fd69196a0266ff8a1bb91e8653e0e682b --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet34_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet34 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet34_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet34_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e29da50ebba22e89465ba46c54cbaf05d34ad00e --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet34_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet34 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet34_vd_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet34_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0253ab83b7bbd12a25a8cca582a18eb5db7b45e6 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet34_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet34_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet34_vd_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet34_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..26740a1877bda6d9bc88cc745b9c8bf2c336a9b7 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet34_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet34_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet34_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet34_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9de97fdd1046be3eec2cb7805ce5ca6b6bbb5ff --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:ResNet50 +cpp_infer_type:cls +cls_inference_model_dir:./ResNet50_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..784f1eb2c3cdd3c4f8f10775fceb9c741c79aeaf --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:ResNet50 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/ResNet50_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/ResNet50_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/ResNet50_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4344caf1f0327d53d64c3175034d5b4f41dff97 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_serving/ +--serving_client:./deploy/paddleserving/ResNet50_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..0f43feceac3f6200901d20540ae32fcaad92bc46 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_serving/ +--serving_client:./deploy/paddleserving/ResNet50_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/ResNet/ResNet50_lite_arm_cpu_cpp.txt b/test_tipc/configs/ResNet/ResNet50_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/ResNet/ResNet50_lite_arm_cpu_cpp.txt rename to test_tipc/configs/ResNet/ResNet50_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/ResNet/ResNet50_train_ampfp16_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_train_ampfp16_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3c7592779c5f1a15279eaa1cb7bdd564a231904e --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_train_ampfp16_infer_python.txt @@ -0,0 +1,56 @@ +===========================train_params=========================== +model_name:ResNet50 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_amp_O1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O1 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128|256 +fp_items:ampfp16 +epoch:1 diff --git a/test_tipc/configs/ResNet/ResNet50_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2870be8321fb7cd5e136054fb34e05c4eac5ae0e --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet50 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..808c71c8b0dbdc8b75a75db14f15708fb27fc55c --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet50 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..1438098f616db6ac34598a64202eaca092a04800 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,56 @@ +===========================train_params=========================== +model_name:ResNet50 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128|256 +fp_items:purefp16 +epoch:1 diff --git a/test_tipc/configs/ResNet/ResNet50_train_pact_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..152918e07894cc5c3e925c4006eb535c2b0ac918 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_train_pact_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet50 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.01 -o Global.pretrained_model="pretrained_model/ResNet50_pretrained" +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_train_ptq_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0f38b64749198b602d31a32522ae0aaf48251300 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_train_ptq_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet50 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=200 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50.yaml -o Global.save_inference_dir=./ResNet50_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_infer.tar +infer_model:./ResNet50_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_vd_FPGM_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_vd_FPGM_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8bb612aee9c4ba1afa57f311a4d80016c041de0f --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_FPGM_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet50_vd_FPGM +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..892b4859b1e3941c7b8ca235152f105d17d06729 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:ResNet50_vd_KL +cpp_infer_type:cls +cls_inference_model_dir:./ResNet50_vd_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e4b19b752e3f0f15fdb552a29035e055633102d --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_kl_quant_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..236b57b03643d1e5cab743abb80dcf8cf03dc8c1 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_kl_quant_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a7de8e40da12067278a67759a05c09794a654eaa --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:ResNet50_vd_PACT +cpp_infer_type:cls +cls_inference_model_dir:./ResNet50_vd_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8889726448f3c01e3c6449c223376c8f9fb5b3a6 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_pact_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e50eff09796295c40b9e147a45b482f8c683d3d --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_pact_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_PACT_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_vd_PACT_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1811dc6910a96fd43c6614bc18fd005dda4478b2 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_PACT_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet50_vd_PACT +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e4b8c1ddfa3502299ff35214f0a1d9c822f7a86b --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:ResNet50_vd +cpp_infer_type:cls +cls_inference_model_dir:./ResNet50_vd_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..82806311e7f2effec06a72dae8d6024e1d54f25e --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:ResNet50_vd +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/ResNet50_vd_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/ResNet50_vd_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/ResNet50_vd_infer/ +Global.use_gpu:False +-c:configs/inference_cls.yaml diff --git a/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2de324433d0a51e994044fdb215ce8ac621aed45 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..d51f5d6ccd38e5e39c4b12b4999b987bb25ce951 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:ResNet50_vd +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/ResNet50_vd_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/ResNet50_vd_serving/ +--serving_client:./deploy/paddleserving/ResNet50_vd_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/config/ResNet/ResNet50_vd_lite_arm_cpu_cpp.txt b/test_tipc/configs/ResNet/ResNet50_vd_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/ResNet/ResNet50_vd_lite_arm_cpu_cpp.txt rename to test_tipc/configs/ResNet/ResNet50_vd_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/ResNet/ResNet50_vd_train_amp_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..21ba8068fdf86fbc2be970a19082e41f72004230 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet50_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./inference +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7980b378e0883d54e13b8eef8547b6a6b2bb2278 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet50_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/ResNet/ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt similarity index 100% rename from test_tipc/config/ResNet/ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt rename to test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt diff --git a/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..697c4894ba48223ecb6c5be40ae85b5ac034d851 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ResNet50_vd +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a56c9598a332100a6d873f96f6c742d674b20a3b --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ResNet50_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null diff --git a/test_tipc/config/ResNet/ResNet50_vd_train_linux_gpu_use_dali.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_use_dali.txt similarity index 100% rename from test_tipc/config/ResNet/ResNet50_vd_train_linux_gpu_use_dali.txt rename to test_tipc/configs/ResNet/ResNet50_vd_train_linux_gpu_use_dali.txt diff --git a/test_tipc/configs/ResNet/ResNet50_vd_train_pact_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5b4779dbf2331660da2dcf00bac119837e79e690 --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_train_pact_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet50_vd +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=200 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.01 -o Global.pretrained_model="pretrained_model/ResNet50_vd_pretrained" +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ResNet/ResNet50_vd_train_ptq_infer_python.txt b/test_tipc/configs/ResNet/ResNet50_vd_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6398beceee2a69f9be84eb7999c801a4fac5201a --- /dev/null +++ b/test_tipc/configs/ResNet/ResNet50_vd_train_ptq_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet50_vd +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=200 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./ResNet50_vd_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar +infer_model:./ResNet50_vd_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SENet/SENet154_vd_train_amp_infer_python.txt b/test_tipc/configs/SENet/SENet154_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..993514b564718da3f4733e481e043a1e4b3e8bce --- /dev/null +++ b/test_tipc/configs/SENet/SENet154_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SENet154_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SENet/SENet154_vd_train_infer_python.txt b/test_tipc/configs/SENet/SENet154_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..56f709d3438a767d6bdb46a68cab69cecb351fae --- /dev/null +++ b/test_tipc/configs/SENet/SENet154_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SENet154_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SENet154_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SENet154_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SENet/SE_ResNeXt101_32x4d_train_amp_infer_python.txt b/test_tipc/configs/SENet/SE_ResNeXt101_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b019043d7f8611b831de507f63db836e4b1ffbf --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNeXt101_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SE_ResNeXt101_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SENet/SE_ResNeXt101_32x4d_train_infer_python.txt b/test_tipc/configs/SENet/SE_ResNeXt101_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..feb04229f313f7f60a9a9a2db883c87823e1c01b --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNeXt101_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SE_ResNeXt101_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt101_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt101_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SENet/SE_ResNeXt50_32x4d_train_amp_infer_python.txt b/test_tipc/configs/SENet/SE_ResNeXt50_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f04b1febb45c2ccc7c1cc174338c037c45547af --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNeXt50_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SE_ResNeXt50_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SENet/SE_ResNeXt50_32x4d_train_infer_python.txt b/test_tipc/configs/SENet/SE_ResNeXt50_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..38b7ae8879ed24d6202c9e716890ff9be8082c7a --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNeXt50_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SE_ResNeXt50_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SENet/SE_ResNeXt50_vd_32x4d_train_amp_infer_python.txt b/test_tipc/configs/SENet/SE_ResNeXt50_vd_32x4d_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0aeb092de314023ff7630e062a409e9606e574c6 --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNeXt50_vd_32x4d_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SE_ResNeXt50_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SENet/SE_ResNeXt50_vd_32x4d_train_infer_python.txt b/test_tipc/configs/SENet/SE_ResNeXt50_vd_32x4d_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5e71729d33f603d022fd99ac18006c5f5237a4ea --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNeXt50_vd_32x4d_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SE_ResNeXt50_vd_32x4d +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNeXt50_vd_32x4d.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNeXt50_vd_32x4d_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SENet/SE_ResNet18_vd_train_amp_infer_python.txt b/test_tipc/configs/SENet/SE_ResNet18_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ed2aae84984997261b0be4906acadc8f92fcbcaa --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNet18_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SE_ResNet18_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet18_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SENet/SE_ResNet18_vd_train_infer_python.txt b/test_tipc/configs/SENet/SE_ResNet18_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..bd7f8526d5b76c55789b6565be4203e6db8eb160 --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNet18_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SE_ResNet18_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet18_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet18_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SENet/SE_ResNet34_vd_train_amp_infer_python.txt b/test_tipc/configs/SENet/SE_ResNet34_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c0c3ffa06c55a41e20ca2d2ad41b8d0962bbf85 --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNet34_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SE_ResNet34_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet34_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SENet/SE_ResNet34_vd_train_infer_python.txt b/test_tipc/configs/SENet/SE_ResNet34_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5766c7b44489cee332c171ee0c47f9fb1eb0949 --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNet34_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SE_ResNet34_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet34_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet34_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SENet/SE_ResNet50_vd_train_amp_infer_python.txt b/test_tipc/configs/SENet/SE_ResNet50_vd_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3ce39a969fb3a3b133a72d7cc3f90231725abedf --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNet50_vd_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SE_ResNet50_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SENet/SE_ResNet50_vd_train_infer_python.txt b/test_tipc/configs/SENet/SE_ResNet50_vd_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccea1da26c6560d2c543a931e60959c462057b21 --- /dev/null +++ b/test_tipc/configs/SENet/SE_ResNet50_vd_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SE_ResNet50_vd +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SENet/SE_ResNet50_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SE_ResNet50_vd_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_swish_train_amp_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_swish_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f447fc0ee1746a435cfdeb06d79166b0dc9ea8ab --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_swish_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_swish +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_swish_train_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_swish_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0c34a8e6c4ff0df297a8d574729022584124a997 --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_swish_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_swish +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_swish.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_25_train_amp_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_25_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..56919a43fb4ddf4479972c322dcb4cf26f39887a --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_25_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_25_train_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_25_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c471555dee270f8439b6fcd124049e2ea998a95b --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_25_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x0_25 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_25.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_33_train_amp_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_33_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fbc01f9d9e4860cf52739cb79f2079afb6f2e07c --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_33_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x0_33 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_33_train_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_33_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d0f547a3dabdff73bc680f05ce6af55406827ec --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_33_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x0_33 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_33.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_5_train_amp_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..cea9fa8242c05d137743fe3adc3d0932dc760daa --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_5_train_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2e7c1e23c5d5808467fbe9a4c04f536d8737812e --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x0_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x0_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x0_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_0_train_amp_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b453a93bab76e0b8ebfb6eae82518479fca19a6b --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_0_train_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7eebc33185fece9cb56366bcf0c8dd328b945ac4 --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_0_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:256|1536 +fp_items:fp32 +epoch:2 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_5_train_amp_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_5_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..460df9e5afa55fe951948748f6a93b9abf1c9c25 --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_5_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_5_train_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_5_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1dfafb9988c3d6b6a6895a4f49677fff95f137a7 --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x1_5_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x1_5 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x1_5.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x2_0_train_amp_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x2_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e2d89d92edff3d763238bd660235b1b010e89390 --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x2_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ShuffleNet/ShuffleNetV2_x2_0_train_infer_python.txt b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x2_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f9bc867d11b248bdf8ff36bcac223e53b696e9f --- /dev/null +++ b/test_tipc/configs/ShuffleNet/ShuffleNetV2_x2_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ShuffleNetV2_x2_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ShuffleNet/ShuffleNetV2_x2_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SqueezeNet/SqueezeNet1_0_train_amp_infer_python.txt b/test_tipc/configs/SqueezeNet/SqueezeNet1_0_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b52b2d4f7624c8339e32a4897910737ac63262a2 --- /dev/null +++ b/test_tipc/configs/SqueezeNet/SqueezeNet1_0_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SqueezeNet1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SqueezeNet/SqueezeNet1_0_train_infer_python.txt b/test_tipc/configs/SqueezeNet/SqueezeNet1_0_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e0e3f4549cde8e48cc09ee2a5e98b8d7be221d3 --- /dev/null +++ b/test_tipc/configs/SqueezeNet/SqueezeNet1_0_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SqueezeNet1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SqueezeNet/SqueezeNet1_1_train_amp_infer_python.txt b/test_tipc/configs/SqueezeNet/SqueezeNet1_1_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..984ab50474112c1e7a076e0b1b3a44e6d4caf501 --- /dev/null +++ b/test_tipc/configs/SqueezeNet/SqueezeNet1_1_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SqueezeNet1_1 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SqueezeNet/SqueezeNet1_1_train_infer_python.txt b/test_tipc/configs/SqueezeNet/SqueezeNet1_1_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..68d4e8b2ebbf52e13bdbeb064b9ec70352a34924 --- /dev/null +++ b/test_tipc/configs/SqueezeNet/SqueezeNet1_1_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SqueezeNet1_1 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SqueezeNet/SqueezeNet1_1.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_amp_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6a1d5f7a53cde1169a3f54513fe857b9088e92f9 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SwinTransformer_base_patch4_window12_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window12_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ec50de2810851556aa0cf717ff3ea23fc9259f51 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window12_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SwinTransformer_base_patch4_window12_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window12_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_amp_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..527d016ed0567c2b52bd003b7c787998037c46a4 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SwinTransformer_base_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ceef499a3247a586deca6e0a2583f9314a2ee3e --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_base_patch4_window7_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SwinTransformer_base_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_base_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_amp_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b74e40a6da1cba8b506b865123b4699a321fda9 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SwinTransformer_large_patch4_window12_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..00b510c64844c458cd9fbe4967c26e73d0a067a7 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window12_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SwinTransformer_large_patch4_window12_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_amp_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a451044f7d7db7dc177f4aa98968a5959a0d77ec --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SwinTransformer_large_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window7_224_22kto1k_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c23cf2ae6b60c904fd9783b3619e004057c3457f --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_large_patch4_window7_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SwinTransformer_large_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window7_224_22kto1k_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_amp_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f69c4af2e3aec726bbe3003654f18e1b9dbc6a1 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SwinTransformer_small_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_small_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..9424b2c711f60c74a5872adfa7e8e3b3d04699b1 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_small_patch4_window7_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:SwinTransformer_small_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_small_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7282e64bd9b5cbf588c395fd5997a8702a5f078f --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_KL +cpp_infer_type:cls +cls_inference_model_dir:./SwinTransformer_tiny_patch4_window7_224_kl_quant_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..efa3cd2065cda78fddb0cea1f8dadadf2632fc50 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..45486c3f6f46f377d40e55db75a3a9bd10963de6 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_KL_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_KL +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_kl_quant_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..820f397a26ab5550b72387415973694cf2a8b75d --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_PACT +cpp_infer_type:cls +cls_inference_model_dir:./SwinTransformer_tiny_patch4_window7_224_pact_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_pact_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a0e017e5aac0bb1032066739725dd519e8c9bfaa --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..23602963966e3387b85404b9d4edad1fd7a75e87 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_PACT_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224_PACT +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/SwinTransformer_tiny_patch4_window7_224_pact_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_pact_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..9f1365975aaabef655b0677b482797b8ae1ea674 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================cpp_infer_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +cpp_infer_type:cls +cls_inference_model_dir:./SwinTransformer_tiny_patch4_window7_224_infer/ +det_inference_model_dir: +cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar +det_inference_url: +infer_quant:False +inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml +use_gpu:True|False +enable_mkldnn:False +cpu_threads:1 +batch_size:1 +use_tensorrt:False +precision:fp32 +image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +benchmark:False +generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..82ea03ce699bf8d89ef1a85c1ba2b05917daab8b --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,16 @@ +===========================paddle2onnx_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +2onnx: paddle2onnx +--model_dir:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./deploy/models/SwinTransformer_tiny_patch4_window7_224_infer/inference.onnx +--opset_version:10 +--enable_onnx_checker:True +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar +inference:./python/predict_cls.py +Global.use_onnx:True +Global.inference_model_dir:./models/SwinTransformer_tiny_patch4_window7_224_infer +Global.use_gpu:False +-c:configs/inference_cls.yaml \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..5b5a2a1acdcf8c2c352b53f28dc325b933ef1053 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_client/ +serving_dir:./deploy/paddleserving +web_service:null +--use_gpu:0|null +pipline:test_cpp_serving_client.py diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d6389e02e41b64539ebd8283b3a1db9fbf0c767 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,14 @@ +===========================serving_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar +trans_model:-m paddle_serving_client.convert +--dirname:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_serving/ +--serving_client:./deploy/paddleserving/SwinTransformer_tiny_patch4_window7_224_client/ +serving_dir:./deploy/paddleserving +web_service:classification_web_service.py +--use_gpu:0|null +pipline:pipeline_http_client.py \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_lite_arm_cpu_cpp.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_lite_arm_cpu_cpp.txt similarity index 100% rename from test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_lite_arm_cpu_cpp.txt rename to test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_lite_arm_cpu_cpp.txt diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f8dbe26a1a7ada3cd09090a395dd159736e0c14 --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|104 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a8ed5d95fc0eff39cd858f54052a3ee97d2cfc6e --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +gpu_list:192.168.0.1,192.168.0.2;0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|104 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..b61bc7705d5bc2015073ef1f58989584b539150c --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:6 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_pact_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_pact_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..536c848b3879a7ef1be734789e628d3c12b839aa --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_pact_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.01 -o Global.pretrained_model="pretrained_model/SwinTransformer_tiny_patch4_window7_224_pretrained" +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Slim.quant.name=pact +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Slim.quant.name=pact +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_tiny_patch4_window7_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|104 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_ptq_infer_python.txt b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c87459d2e4ddac829d92931776cee8c8731f834c --- /dev/null +++ b/test_tipc/configs/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_ptq_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.save_inference_dir=./SwinTransformer_tiny_patch4_window7_224_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar +infer_model:./SwinTransformer_tiny_patch4_window7_224_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|104 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/TNT/TNT_small_train_amp_infer_python.txt b/test_tipc/configs/TNT/TNT_small_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..749043a2d560e1253cee9c884bf2da06ca79a312 --- /dev/null +++ b/test_tipc/configs/TNT/TNT_small_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:TNT_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/TNT/TNT_small_train_infer_python.txt b/test_tipc/configs/TNT/TNT_small_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fe44c0473277e19acb49d78a5ecc62d5a7447eb --- /dev/null +++ b/test_tipc/configs/TNT/TNT_small_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:TNT_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/TNT/TNT_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/TNT_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Twins/alt_gvt_base_train_amp_infer_python.txt b/test_tipc/configs/Twins/alt_gvt_base_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8e00798bc6dced253cca37b8e53bd1cea86211a --- /dev/null +++ b/test_tipc/configs/Twins/alt_gvt_base_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:alt_gvt_base +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Twins/alt_gvt_base_train_infer_python.txt b/test_tipc/configs/Twins/alt_gvt_base_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..510e3300c79aaef99d399b5514e97a847a8e21ce --- /dev/null +++ b/test_tipc/configs/Twins/alt_gvt_base_train_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:alt_gvt_base +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|144 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/Twins/alt_gvt_large_train_amp_infer_python.txt b/test_tipc/configs/Twins/alt_gvt_large_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..b88badd3100fcbf44f1ef9290ebcc3ccf6541066 --- /dev/null +++ b/test_tipc/configs/Twins/alt_gvt_large_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:alt_gvt_large +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_large_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Twins/alt_gvt_large_train_infer_python.txt b/test_tipc/configs/Twins/alt_gvt_large_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f33ed8b342e5716ab2644851cedd8d3818ef7d0b --- /dev/null +++ b/test_tipc/configs/Twins/alt_gvt_large_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:alt_gvt_large +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_large_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Twins/alt_gvt_small_train_amp_infer_python.txt b/test_tipc/configs/Twins/alt_gvt_small_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c6fe05e86e04f47cbc6a4088c1b249fde4db8e23 --- /dev/null +++ b/test_tipc/configs/Twins/alt_gvt_small_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:alt_gvt_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Twins/alt_gvt_small_train_infer_python.txt b/test_tipc/configs/Twins/alt_gvt_small_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..930eeee78615a9274f6508be243cf2adb34c7c8e --- /dev/null +++ b/test_tipc/configs/Twins/alt_gvt_small_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:alt_gvt_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/alt_gvt_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Twins/pcpvt_base_train_amp_infer_python.txt b/test_tipc/configs/Twins/pcpvt_base_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..52ff321b7cc9e9871080af33062d9116b31e3713 --- /dev/null +++ b/test_tipc/configs/Twins/pcpvt_base_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:pcpvt_base +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Twins/pcpvt_base_train_infer_python.txt b/test_tipc/configs/Twins/pcpvt_base_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..92b13842cb6cb8923b2903b475aa4c4528fecbf8 --- /dev/null +++ b/test_tipc/configs/Twins/pcpvt_base_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:pcpvt_base +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_base_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Twins/pcpvt_large_train_amp_infer_python.txt b/test_tipc/configs/Twins/pcpvt_large_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..abd2b13006c1003d8f00c9919a6ad53d9f117cb9 --- /dev/null +++ b/test_tipc/configs/Twins/pcpvt_large_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:pcpvt_large +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_large_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Twins/pcpvt_large_train_infer_python.txt b/test_tipc/configs/Twins/pcpvt_large_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..71faf213630a39eb1bdc567fec2593bfa66b39f9 --- /dev/null +++ b/test_tipc/configs/Twins/pcpvt_large_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:pcpvt_large +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_large.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_large_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Twins/pcpvt_small_train_amp_infer_python.txt b/test_tipc/configs/Twins/pcpvt_small_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f82807b77c6c213bafff8f7f856dad7bdeca296c --- /dev/null +++ b/test_tipc/configs/Twins/pcpvt_small_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:pcpvt_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Twins/pcpvt_small_train_infer_python.txt b/test_tipc/configs/Twins/pcpvt_small_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..61e25436ef3f0400748ba311efbc0247d3ccb66d --- /dev/null +++ b/test_tipc/configs/Twins/pcpvt_small_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:pcpvt_small +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Twins/pcpvt_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/pcpvt_small_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/VAN/VAN_tiny_train_infer_python.txt b/test_tipc/configs/VAN/VAN_tiny_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..82fc3845d7fa6ac6cf5929bf82932a8d2a09a2ea --- /dev/null +++ b/test_tipc/configs/VAN/VAN_tiny_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:VAN_tiny +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VAN/VAN_tiny.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VAN/VAN_tiny.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VAN/VAN_tiny.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +inference_dir:null +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=248 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.5,0.5,0.5] -o PreProcess.transform_ops.2.NormalizeImage.std=[0.5,0.5,0.5] +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/configs/VGG/VGG11_train_amp_infer_python.txt b/test_tipc/configs/VGG/VGG11_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..277faf43d80ed5aa68abe84e839e34d021210877 --- /dev/null +++ b/test_tipc/configs/VGG/VGG11_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:VGG11 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG11_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VGG/VGG11_train_infer_python.txt b/test_tipc/configs/VGG/VGG11_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..149234c7ba4d84694a5a713e1dde02acd9dce8ee --- /dev/null +++ b/test_tipc/configs/VGG/VGG11_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:VGG11 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG11.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG11_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/VGG/VGG13_train_amp_infer_python.txt b/test_tipc/configs/VGG/VGG13_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c74c089fe976192796499201ba064c14c6d93aa8 --- /dev/null +++ b/test_tipc/configs/VGG/VGG13_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:VGG13 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG13_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VGG/VGG13_train_infer_python.txt b/test_tipc/configs/VGG/VGG13_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5b8486071ef1243122186f406fe5cf8576483c70 --- /dev/null +++ b/test_tipc/configs/VGG/VGG13_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:VGG13 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG13.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG13_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/VGG/VGG16_train_amp_infer_python.txt b/test_tipc/configs/VGG/VGG16_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..bc21033eb7dfb095c093af73010633187439c840 --- /dev/null +++ b/test_tipc/configs/VGG/VGG16_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:VGG16 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG16_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VGG/VGG16_train_infer_python.txt b/test_tipc/configs/VGG/VGG16_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..74a0c6af81faddd0f5ddfc973f49ea26be3c82a8 --- /dev/null +++ b/test_tipc/configs/VGG/VGG16_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:VGG16 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG16.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG16_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/VGG/VGG19_train_amp_infer_python.txt b/test_tipc/configs/VGG/VGG19_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d76977edebe859d65def298128d8cf0c90f02440 --- /dev/null +++ b/test_tipc/configs/VGG/VGG19_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:VGG19 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG19_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VGG/VGG19_train_infer_python.txt b/test_tipc/configs/VGG/VGG19_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1518deb53fccf0beb75b1fcaf1e51859633921b --- /dev/null +++ b/test_tipc/configs/VGG/VGG19_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:VGG19 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VGG/VGG19.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG19_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/VisionTransformer/ViT_base_patch16_224_train_amp_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_base_patch16_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..22fd9e2eca3eff132224dc3b3a1d44601c3bd287 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_base_patch16_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ViT_base_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VisionTransformer/ViT_base_patch16_224_train_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_base_patch16_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7edde6d29c2db05537ebaa7151c30d8db9a05466 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_base_patch16_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ViT_base_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/VisionTransformer/ViT_base_patch16_384_train_amp_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_base_patch16_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ad27bc3fa76ba4f27c01fd58b8a2f5b49f10728b --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_base_patch16_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ViT_base_patch16_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VisionTransformer/ViT_base_patch16_384_train_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_base_patch16_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..92a0ebf1e8adb8afba3c9731fcbd19380805c80e --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_base_patch16_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ViT_base_patch16_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch16_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch16_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/configs/VisionTransformer/ViT_base_patch32_384_train_amp_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_base_patch32_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c4d1c13ce710cffe98256ee43cce58385cac7506 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_base_patch32_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ViT_base_patch32_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch32_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VisionTransformer/ViT_base_patch32_384_train_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_base_patch32_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..1347eaa3d5d7692e40fb934911612f88c9b75e2d --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_base_patch32_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ViT_base_patch32_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_base_patch32_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_base_patch32_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/configs/VisionTransformer/ViT_large_patch16_224_train_amp_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_large_patch16_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2b5221f7bcb08836118bdc1bd11959a20564913a --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_large_patch16_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ViT_large_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VisionTransformer/ViT_large_patch16_224_train_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_large_patch16_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..293e83d3ad1870932c2c8111084915d6423d6b42 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_large_patch16_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ViT_large_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/VisionTransformer/ViT_large_patch16_384_train_amp_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_large_patch16_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..547596aee9580db7f462bf1a5a9b9ecebfc27758 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_large_patch16_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ViT_large_patch16_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VisionTransformer/ViT_large_patch16_384_train_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_large_patch16_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..bd228e38106ee4bdacecf30732aa4fa552f65b43 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_large_patch16_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ViT_large_patch16_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch16_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch16_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/configs/VisionTransformer/ViT_large_patch32_384_train_amp_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_large_patch32_384_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c667e953b14e8212de5bfe3f8f02fa5be8dada2 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_large_patch32_384_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ViT_large_patch32_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch32_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VisionTransformer/ViT_large_patch32_384_train_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_large_patch32_384_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c97c77b2fb97c16a107447e1197a8e74346b2cad --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_large_patch32_384_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ViT_large_patch32_384 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:2 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_large_patch32_384.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_large_patch32_384_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=384 -o PreProcess.transform_ops.1.CropImage.size=384 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,384,384]}] \ No newline at end of file diff --git a/test_tipc/configs/VisionTransformer/ViT_small_patch16_224_train_amp_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_small_patch16_224_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6004106ee3e2a78e9f38d4525afc89b65d0ab941 --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_small_patch16_224_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ViT_small_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_small_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/VisionTransformer/ViT_small_patch16_224_train_infer_python.txt b/test_tipc/configs/VisionTransformer/ViT_small_patch16_224_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..46dc2b767b48e66bd1f033218a7e2429f74a111d --- /dev/null +++ b/test_tipc/configs/VisionTransformer/ViT_small_patch16_224_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:ViT_small_patch16_224 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/VisionTransformer/ViT_small_patch16_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ViT_small_patch16_224_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/configs/Xception/Xception41_deeplab_train_amp_infer_python.txt b/test_tipc/configs/Xception/Xception41_deeplab_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f37bc8395d261777fddd811aa205044794400f7d --- /dev/null +++ b/test_tipc/configs/Xception/Xception41_deeplab_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Xception41_deeplab +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_deeplab_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Xception/Xception41_deeplab_train_infer_python.txt b/test_tipc/configs/Xception/Xception41_deeplab_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..9cf7e7e2e9959d871376edb61dd44b98d81609d4 --- /dev/null +++ b/test_tipc/configs/Xception/Xception41_deeplab_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Xception41_deeplab +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41_deeplab.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_deeplab_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/configs/Xception/Xception41_train_amp_infer_python.txt b/test_tipc/configs/Xception/Xception41_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f5c875bcaa29e04d45665ec95f31159d59b21d21 --- /dev/null +++ b/test_tipc/configs/Xception/Xception41_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Xception41 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Xception/Xception41_train_infer_python.txt b/test_tipc/configs/Xception/Xception41_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..55bf6b9f002c4f313ade9fdfac0ae0553d84046b --- /dev/null +++ b/test_tipc/configs/Xception/Xception41_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Xception41 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception41.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception41_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/configs/Xception/Xception65_deeplab_train_amp_infer_python.txt b/test_tipc/configs/Xception/Xception65_deeplab_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..9d524cb6d65a91633f96f30e52bf88fc6e02f970 --- /dev/null +++ b/test_tipc/configs/Xception/Xception65_deeplab_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Xception65_deeplab +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_deeplab_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Xception/Xception65_deeplab_train_infer_python.txt b/test_tipc/configs/Xception/Xception65_deeplab_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e84160070a815e6bd261e8a5a02ba05e5369c3b5 --- /dev/null +++ b/test_tipc/configs/Xception/Xception65_deeplab_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Xception65_deeplab +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65_deeplab.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_deeplab_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/configs/Xception/Xception65_train_amp_infer_python.txt b/test_tipc/configs/Xception/Xception65_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..228e5bf07725d6b1c5ccd95555e9c27698bc821c --- /dev/null +++ b/test_tipc/configs/Xception/Xception65_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Xception65 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Xception/Xception65_train_infer_python.txt b/test_tipc/configs/Xception/Xception65_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf27339bc2066f887ba78deb7f7be2f7ff09afe0 --- /dev/null +++ b/test_tipc/configs/Xception/Xception65_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Xception65 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception65.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception65_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/configs/Xception/Xception71_train_amp_infer_python.txt b/test_tipc/configs/Xception/Xception71_train_amp_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..472d4bae3645df758e6ca28e69f26d055d06b596 --- /dev/null +++ b/test_tipc/configs/Xception/Xception71_train_amp_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:Xception71 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:amp_train +amp_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=128 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception71_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:True|False +-o Global.cpu_num_threads:1|6 +-o Global.batch_size:1|16 +-o Global.use_tensorrt:True|False +-o Global.use_fp16:True|False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val +-o Global.save_log_path:null +-o Global.benchmark:True +null:null +null:null diff --git a/test_tipc/configs/Xception/Xception71_train_infer_python.txt b/test_tipc/configs/Xception/Xception71_train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..eeada259ff0cd20b472e9400555da222daf2baf5 --- /dev/null +++ b/test_tipc/configs/Xception/Xception71_train_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:Xception71 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/Xception/Xception71.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Xception71_pretrained.pdparams +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=320 -o PreProcess.transform_ops.1.CropImage.size=299 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,299,299]}] \ No newline at end of file diff --git a/test_tipc/docs/benchmark_train.md b/test_tipc/docs/benchmark_train.md index 20cf9287423616103609187d2104d68c77f34650..06c4df3d380cdb76aec1b34ca7eb07b689d83df5 100644 --- a/test_tipc/docs/benchmark_train.md +++ b/test_tipc/docs/benchmark_train.md @@ -17,7 +17,7 @@ bash test_tipc/prepare.sh test_tipc/configs/MobileNetV2/MobileNetV2_train_infer_ ```shell # 运行格式:bash test_tipc/benchmark_train.sh train_benchmark.txt mode -bash test_tipc/benchmark_train.sh test_tipc/config/MobileNetV2/MobileNetV2_train_infer_python.txt benchmark_train +bash test_tipc/benchmark_train.sh test_tipc/configs/MobileNetV2/MobileNetV2_train_infer_python.txt benchmark_train ``` diff --git a/test_tipc/docs/test_inference_cpp.md b/test_tipc/docs/test_inference_cpp.md index eabf8774d37454461fe28a5fc1d5ed0b3135fcad..256e6a5f4d10d2e64e71778fe43f84c1784b3000 100644 --- a/test_tipc/docs/test_inference_cpp.md +++ b/test_tipc/docs/test_inference_cpp.md @@ -1,86 +1,323 @@ -# C++预测功能测试 +# Linux GPU/CPU C++ 推理功能测试 -C++预测功能测试的主程序为`test_inference_cpp.sh`,可以测试基于C++预测库的模型推理功能。 +Linux GPU/CPU C++ 推理功能测试的主程序为`test_inference_cpp.sh`,可以测试基于C++预测引擎的推理功能。 ## 1. 测试结论汇总 -基于训练是否使用量化,进行本测试的模型可以分为`正常模型`和`量化模型`,这两类模型对应的C++预测功能汇总如下: +- 推理相关: -| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 | -| ---- | ---- | ---- | :----: | :----: | :----: | -| 正常模型 | GPU | 1/6 | fp32/fp16 | - | - | -| 正常模型 | CPU | 1/6 | - | fp32 | 支持 | -| 量化模型 | GPU | 1/6 | int8 | - | - | -| 量化模型 | CPU | 1/6 | - | int8 | 支持 | +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :-------------: | :------------------------------------------: | :--------: | :--------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_PACT | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_KL | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_PACT | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_small_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_small_PACT | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_KL | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_PACT | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base_KL | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd_KL | 支持 | 支持 | +| ResNet | ResNet50_vd_PACT | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_KL | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_PACT | 支持 | 支持 | -## 2. 测试流程 -运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 +## 2. 测试流程(以**ResNet50**为例) -### 2.1 功能测试 -先运行`prepare.sh`准备数据和模型,然后运行`test_inference_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`cpp_infer_*.log`后缀的日志文件。 + +
+准备数据、准备推理模型、编译opencv、编译(下载)Paddle Inference、编译C++预测Demo(已写入prepare.sh自动执行,点击以展开详细内容或者折叠) + + +### 2.1 准备数据和推理模型 + +#### 2.1.1 准备数据 + +默认使用`./deploy/images/ILSVRC2012_val_00000010.jpeg`作为测试输入图片。 + +#### 2.1.2 准备推理模型 + +* 如果已经训练好了模型,可以参考[模型导出](../../docs/zh_CN/inference_deployment/export_model.md),导出`inference model`,并将导出路径设置为`./deploy/models/ResNet50_infer`, +导出完毕后文件结构如下 ```shell -bash test_tipc/prepare.sh test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt cpp_infer +./deploy/models/ResNet50_infer/ +├── inference.pdmodel +├── inference.pdiparams +└── inference.pdiparams.info +``` + +### 2.2 准备环境 + +#### 2.2.1 运行准备 + +配置合适的编译和执行环境,其中包括编译器,cuda等一些基础库,建议安装docker环境,[参考链接](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/docker/linux-docker.html)。 -# 用法1: -bash test_tipc/test_inference_cpp.sh test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt -# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 -bash test_tipc/test_inference_cpp.sh test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 1 +#### 2.2.2 编译opencv库 + +* 首先需要从opencv官网上下载Linux环境下的源码,以3.4.7版本为例,下载及解压缩命令如下: + +``` +cd deploy/cpp +wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz +tar -xvf 3.4.7.tar.gz ``` -运行预测指令后,在`test_tipc/output`文件夹下自动会保存运行日志,包括以下文件: +* 编译opencv,首先设置opencv源码路径(`root_path`)以及安装路径(`install_path`),`root_path`为下载的opencv源码路径,`install_path`为opencv的安装路径。在本例中,源码路径即为当前目录下的`opencv-3.4.7/`。 ```shell -test_tipc/output/ -|- results_cpp.log # 运行指令状态的日志 -|- cls_cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log # CPU上不开启Mkldnn,线程数设置为1,测试batch_size=1条件下的预测运行日志 -|- cls_cpp_infer_cpu_usemkldnn_False_threads_6_precision_fp32_batchsize_1.log # CPU上不开启Mkldnn,线程数设置为6,测试batch_size=1条件下的预测运行日志 -|- cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log # GPU上不开启TensorRT,测试batch_size=1的fp32精度预测日志 -|- cls_cpp_infer_gpu_usetrt_True_precision_fp16_batchsize_1.log # GPU上开启TensorRT,测试batch_size=1的fp16精度预测日志 -...... +cd ./opencv-3.4.7 +export root_path=$PWD +export install_path=${root_path}/opencv3 ``` -其中results_cpp.log中包含了每条指令的运行状态,如果运行成功会输出: +* 然后在opencv源码路径下,按照下面的命令进行编译。 + +```shell +rm -rf build +mkdir build +cd build + +cmake .. \ + -DCMAKE_INSTALL_PREFIX=${install_path} \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DWITH_IPP=OFF \ + -DBUILD_IPP_IW=OFF \ + -DWITH_LAPACK=OFF \ + -DWITH_EIGEN=OFF \ + -DCMAKE_INSTALL_LIBDIR=lib64 \ + -DWITH_ZLIB=ON \ + -DBUILD_ZLIB=ON \ + -DWITH_JPEG=ON \ + -DBUILD_JPEG=ON \ + -DWITH_PNG=ON \ + -DBUILD_PNG=ON \ + -DWITH_TIFF=ON \ + -DBUILD_TIFF=ON + +make -j +make install ``` -Run successfully with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml 2>&1|tee test_tipc/output/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log -...... + +* `make install`完成之后,会在该文件夹下生成opencv头文件和库文件,用于后面的代码编译。 + +以opencv3.4.7版本为例,最终在安装路径下的文件结构如下所示。**注意**:不同的opencv版本,下述的文件结构可能不同。 + +```shell +opencv3/ +├── bin :可执行文件 +├── include :头文件 +├── lib64 :库文件 +└── share :部分第三方库 +``` + +#### 2.2.3 下载或者编译Paddle预测库 + +* 有2种方式获取Paddle预测库,下面进行详细介绍。 + +##### 预测库源码编译 +* 如果希望获取最新预测库特性,可以从Paddle github上克隆最新代码,源码编译预测库。 +* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。 + +```shell +git clone https://github.com/PaddlePaddle/Paddle.git +``` + +* 进入Paddle目录后,使用如下命令编译。 + +```shell +rm -rf build +mkdir build +cd build + +cmake .. \ + -DWITH_CONTRIB=OFF \ + -DWITH_MKL=ON \ + -DWITH_MKLDNN=ON \ + -DWITH_TESTING=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_INFERENCE_API_TEST=OFF \ + -DON_INFER=ON \ + -DWITH_PYTHON=ON +make -j +make inference_lib_dist ``` -如果运行失败,会输出: + +更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)。 + + +* 编译完成之后,可以在`build/paddle_inference_install_dir/`文件下看到生成了以下文件及文件夹。 + ``` -Run failed with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml 2>&1|tee test_tipc/output/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log -...... +build/paddle_inference_install_dir/ +├── CMakeCache.txt +├── paddle +├── third_party +└── version.txt ``` -可以很方便的根据results_cpp.log中的内容判定哪一个指令运行错误。 +其中`paddle`就是之后进行C++预测时所需的Paddle库,`version.txt`中包含当前预测库的版本信息。 + +##### 直接下载安装 -### 2.2 精度测试 +* [Paddle预测库官网](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。 -使用compare_results.py脚本比较模型预测的结果是否符合预期,主要步骤包括: -- 提取日志中的预测坐标; -- 从本地文件中提取保存好的坐标结果; -- 比较上述两个结果是否符合精度预期,误差大于设置阈值时会报错。 + 以`manylinux_cuda10.1_cudnn7.6_avx_mkl_trt6_gcc8.2`版本为例,使用下述命令下载并解压: + + +```shell +wget https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz + +tar -xvf paddle_inference.tgz +``` + +最终会在当前的文件夹中生成`paddle_inference/`的子文件夹,文件内容和上述的paddle_inference_install_dir一样。 + + +#### 2.2.4 编译C++预测Demo + +* 编译命令如下,其中Paddle C++预测库、opencv等其他依赖库的地址需要换成自己机器上的实际地址。 + + +```shell +# 在deploy/cpp下执行以下命令 +bash tools/build.sh +``` + +具体地,`tools/build.sh`中内容如下。 + +```shell +OPENCV_DIR=your_opencv_dir +LIB_DIR=your_paddle_inference_dir +CUDA_LIB_DIR=your_cuda_lib_dir +CUDNN_LIB_DIR=your_cudnn_lib_dir +TENSORRT_DIR=your_tensorrt_lib_dir + +BUILD_DIR=build +rm -rf ${BUILD_DIR} +mkdir ${BUILD_DIR} +cd ${BUILD_DIR} +cmake .. \ + -DPADDLE_LIB=${LIB_DIR} \ + -DWITH_MKL=ON \ + -DDEMO_NAME=clas_system \ + -DWITH_GPU=OFF \ + -DWITH_STATIC_LIB=OFF \ + -DWITH_TENSORRT=OFF \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DOPENCV_DIR=${OPENCV_DIR} \ + -DCUDNN_LIB=${CUDNN_LIB_DIR} \ + -DCUDA_LIB=${CUDA_LIB_DIR} \ + +make -j +``` + +上述命令中, + +* `OPENCV_DIR`为opencv编译安装的地址(本例中需修改为`opencv-3.4.7/opencv3`文件夹的路径); + +* `LIB_DIR`为下载的Paddle预测库(`paddle_inference`文件夹),或编译生成的Paddle预测库(`build/paddle_inference_install_dir`文件夹)的路径; + +* `CUDA_LIB_DIR`为cuda库文件地址,在docker中一般为`/usr/local/cuda/lib64`; + +* `CUDNN_LIB_DIR`为cudnn库文件地址,在docker中一般为`/usr/lib64`。 + +* `TENSORRT_DIR`是tensorrt库文件地址,在dokcer中一般为`/usr/local/TensorRT-7.2.3.4/`,TensorRT需要结合GPU使用。 + +在执行上述命令,编译完成之后,会在当前路径下生成`build`文件夹,其中生成一个名为`clas_system`的可执行文件。 +
+ +* 可执行以下命令,自动完成上述准备环境中的所需内容 +```shell +bash test_tipc/prepare.sh test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt cpp_infer +``` +### 2.3 功能测试 + + +测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 -#### 使用方式 -运行命令: ```shell -python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/cls_cpp_*.txt --log_file=./test_tipc/output/cls_cpp_*.log --atol=1e-3 --rtol=1e-3 +bash test_tipc/test_inference_cpp.sh ${your_params_file} cpp_infer ``` -参数介绍: -- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在test_tipc/result/ 文件夹下 -- log_file: 指向运行test_tipc/test_inference_cpp.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持cpp_infer_*.log格式传入 -- atol: 设置的绝对误差 -- rtol: 设置的相对误差 +以`ResNet50`的`Linux GPU/CPU C++推理测试`为例,命令如下所示。 -#### 运行结果 +```shell +bash test_tipc/test_inference_cpp.sh test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt cpp_infer +``` -正常运行效果如下图: - +输出结果如下,表示命令运行成功。 -出现不一致结果时的运行输出: - +```shell +Run successfully with command - ResNet50 - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cpp_infer/cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log 2>&1! +Run successfully with command - ResNet50 - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cpp_infer/cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log 2>&1! +``` +最终log中会打印出结果,如下所示 +```log +You are using Paddle compiled with TensorRT, but TensorRT dynamic library is not found. Ignore this if TensorRT is not needed. +=======Paddle Class inference config====== +Global: + infer_imgs: ./deploy/images/ILSVRC2012_val_00000010.jpeg + inference_model_dir: ./deploy/models/ResNet50_infer + batch_size: 1 + use_gpu: True + enable_mkldnn: True + cpu_num_threads: 10 + enable_benchmark: True + use_fp16: False + ir_optim: True + use_tensorrt: False + gpu_mem: 8000 + enable_profile: False +PreProcess: + transform_ops: + - ResizeImage: + resize_short: 256 + - CropImage: + size: 224 + - NormalizeImage: + scale: 0.00392157 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: "" + channel_num: 3 + - ToCHWImage: ~ +PostProcess: + main_indicator: Topk + Topk: + topk: 5 + class_id_map_file: ./ppcls/utils/imagenet1k_label_list.txt + SavePreLabel: + save_dir: ./pre_label/ +=======End of Paddle Class inference config====== +img_file_list length: 1 +Current image path: ./deploy/images/ILSVRC2012_val_00000010.jpeg +Current total inferen time cost: 5449.39 ms. + Top1: class_id: 153, score: 0.4144, label: Maltese dog, Maltese terrier, Maltese + Top2: class_id: 332, score: 0.3909, label: Angora, Angora rabbit + Top3: class_id: 229, score: 0.0514, label: Old English sheepdog, bobtail + Top4: class_id: 204, score: 0.0430, label: Lhasa, Lhasa apso + Top5: class_id: 265, score: 0.0420, label: toy poodle -## 3. 更多教程 +``` +详细log位于`./test_tipc/output/ResNet50/cpp_infer/cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log`和`./test_tipc/output/ResNet50/cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log`中。 -本文档为功能测试用,更详细的c++预测使用教程请参考:[服务器端C++预测](../../docs/zh_CN/inference_deployment/) +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/docs/test_lite_arm_cpu_cpp.md b/test_tipc/docs/test_lite_arm_cpu_cpp.md index eb43a21f48e828744d804de02833f5ed19fa24e8..45e2116b28475c4ee857013269bcb6f8fd36cc59 100644 --- a/test_tipc/docs/test_lite_arm_cpu_cpp.md +++ b/test_tipc/docs/test_lite_arm_cpu_cpp.md @@ -15,7 +15,7 @@ Lite_arm_cpp_cpu 预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`, 先运行 `prepare_lite_arm_cpu_cpp.sh` 准备数据和模型,然后运行 `test_lite_arm_cpu_cpp.sh` 进行测试,最终在 `./output` 目录下生成 `lite_*.log` 后缀的日志文件。 ```shell -bash test_tipc/prepare_lite_arm_cpu_cpp.sh test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt +bash test_tipc/prepare_lite_arm_cpu_cpp.sh test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_lite_arm_cpu_cpp.txt ``` 运行预测指令后,在`./output`文件夹下自动会保存运行日志,包括以下文件: diff --git a/test_tipc/docs/test_paddle2onnx.md b/test_tipc/docs/test_paddle2onnx.md new file mode 100644 index 0000000000000000000000000000000000000000..e705161085191590650af1747071a76f3e3b1398 --- /dev/null +++ b/test_tipc/docs/test_paddle2onnx.md @@ -0,0 +1,52 @@ +# Paddle2onnx预测功能测试 + +PaddleServing预测功能测试的主程序为`test_paddle2onnx.sh`,可以测试Paddle2ONNX的模型转化功能,并验证正确性。 + +## 1. 测试结论汇总 + +基于训练是否使用量化,进行本测试的模型可以分为`正常模型`和`量化模型`,这两类模型对应的Paddle2ONNX预测功能汇总如下: + +| 模型类型 |device | +| ---- | ---- | +| 正常模型 | GPU | +| 正常模型 | CPU | + + +## 2. 测试流程 + +以下内容以`ResNet50`模型的paddle2onnx测试为例 + +### 2.1 功能测试 +先运行`prepare.sh`准备数据和模型,然后运行`test_paddle2onnx.sh`进行测试,最终在`test_tipc/output/ResNet50`目录下生成`paddle2onnx_infer_*.log`后缀的日志文件 +下方展示以PPHGNet_small为例的测试命令与结果。 + +```shell +bash test_tipc/prepare.sh ./test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt paddle2onnx_infer + +# 用法: +bash test_tipc/test_paddle2onnx.sh ./test_tipc/configs/ResNet/ResNet50_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt +``` + +#### 运行结果 + +各测试的运行情况会打印在 `./test_tipc/output/ResNet50/results_paddle2onnx.log` 中: +运行成功时会输出: + +``` +Run successfully with command - paddle2onnx --model_dir=./deploy/models/ResNet50_infer/ --model_filename=inference.pdmodel --params_filename=inference.pdiparams --save_file=./deploy/models/ResNet50_infer/inference.onnx --opset_version=10 --enable_onnx_checker=True! +Run successfully with command - cd deploy && python3.7 ./python/predict_cls.py -o Global.inference_model_dir=./models/ResNet50_infer -o Global.use_onnx=True -o Global.use_gpu=False -c=configs/inference_cls.yaml > ../test_tipc/output/ResNet50/paddle2onnx_infer_cpu.log 2>&1 && cd ../! + +``` + +运行失败时会输出: + +``` +Run failed with command - paddle2onnx --model_dir=./deploy/models/ResNet50_infer/ --model_filename=inference.pdmodel --params_filename=inference.pdiparams --save_file=./deploy/models/ResNet50_infer/inference.onnx --opset_version=10 --enable_onnx_checker=True! +Run failed with command - cd deploy && python3.7 ./python/predict_cls.py -o Global.inference_model_dir=./models/ResNet50_infer -o Global.use_onnx=True -o Global.use_gpu=False -c=configs/inference_cls.yaml > ../test_tipc/output/ResNet50/paddle2onnx_infer_cpu.log 2>&1 && cd ../! +... +``` + + +## 3. 更多教程 + +本文档为功能测试用,更详细的Paddle2onnx预测使用教程请参考:[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) diff --git a/test_tipc/docs/test_serving_infer_cpp.md b/test_tipc/docs/test_serving_infer_cpp.md new file mode 100644 index 0000000000000000000000000000000000000000..3ddd0c253b9d596697da8108e9cf563a21bf0cba --- /dev/null +++ b/test_tipc/docs/test_serving_infer_cpp.md @@ -0,0 +1,124 @@ +# Linux GPU/CPU C++ 服务化部署测试 + +Linux GPU/CPU C++ 服务化部署测试的主程序为`test_serving_infer_cpp.sh`,可以测试基于C++的模型服务化部署功能。 + + +## 1. 测试结论汇总 + +- 推理相关: + +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :-------------: | :------------------------------------------: | :--------: | :--------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_PACT | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_KL | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_PACT | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_small_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_small_PACT | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_KL | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_PACT | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base_KL | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd_KL | 支持 | 支持 | +| ResNet | ResNet50_vd_PACT | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_KL | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_PACT | 支持 | 支持 | + + +## 2. 测试流程 + +### 2.1 准备数据 + +分类模型默认使用`./deploy/paddleserving/daisy.jpg`作为测试输入图片,无需下载 +识别模型默认使用`drink_dataset_v1.0/test_images/001.jpeg`作为测试输入图片,在**2.2 准备环境**中会下载好。 + +### 2.2 准备环境 + + +- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 + ```shell + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` + +- 安装依赖 + ```shell + python3.7 -m pip install -r requirements.txt + ``` + +- 安装TensorRT + 编译 serving-server 的脚本内会设置 `TENSORRT_LIBRARY_PATH` 这一环境变量,因此编译前需要安装TensorRT。 + + 如果使用`registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82`镜像进测试,则已自带TensorRT无需安装, + 否则可以参考 [3.2 安装TensorRT](install.md#32-安装tensorrt) 进行安装,并在修改 [build_server.sh](../../deploy/paddleserving/build_server.sh#L62) 的 `TENSORRT_LIBRARY_PATH` 地址为安装后的路径。 + +- 安装 PaddleServing 相关组件,包括serving_client、serving-app,自动编译并安装带自定义OP的 serving_server 包,以及自动下载并解压推理模型 + ```bash + # 安装必要依赖包 + python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 安装编译自定义OP的serving-server包 + pushd ./deploy/paddleserving + source build_server.sh python3.7 + popd + + # 测试PP-ShiTu识别模型时需安装faiss包 + python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 下载模型与数据 + bash test_tipc/prepare.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer + ``` + +### 2.3 功能测试 + +测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 + +```bash +bash test_tipc/test_serving_infer_cpp.sh ${your_params_file} ${mode} +``` + +以`PPLCNet_x1_0`的`Linux GPU/CPU C++ 服务化部署测试`为例,命令如下所示。 + + +```bash +bash test_tipc/test_serving_infer_cpp.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt serving_infer +``` + +输出结果如下,表示命令运行成功。 + +``` +Run successfully with command - PPLCNet_x1_0 - python3.7 test_cpp_serving_client.py > ../../test_tipc/output/PPLCNet_x1_0/server_infer_cpp_gpu_pipeline_batchsize_1.log 2>&1 ! +Run successfully with command - PPLCNet_x1_0 - python3.7 test_cpp_serving_client.py > ../../test_tipc/output/PPLCNet_x1_0/server_infer_cpp_cpu_pipeline_batchsize_1.log 2>&1 ! +``` + +预测结果会自动保存在 `./test_tipc/output/PPLCNet_x1_0/server_infer_gpu_pipeline_http_batchsize_1.log` ,可以看到 PaddleServing 的运行结果: + +``` +WARNING: Logging before InitGoogleLogging() is written to STDERR +I0612 09:55:16.109890 38303 naming_service_thread.cpp:202] brpc::policy::ListNamingService("127.0.0.1:9292"): added 1 +I0612 09:55:16.172924 38303 general_model.cpp:490] [client]logid=0,client_cost=60.772ms,server_cost=57.6ms. +prediction: daisy, probability: 0.9099399447441101 +0.06275796890258789 +``` + + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/docs/test_serving_infer_python.md b/test_tipc/docs/test_serving_infer_python.md new file mode 100644 index 0000000000000000000000000000000000000000..9bd9dc4c2d65f2d55a0335d012fdef37b7097f54 --- /dev/null +++ b/test_tipc/docs/test_serving_infer_python.md @@ -0,0 +1,110 @@ +# Linux GPU/CPU PYTHON 服务化部署测试 + +Linux GPU/CPU PYTHON 服务化部署测试的主程序为`test_serving_infer_python.sh`,可以测试基于Python的模型服务化部署功能。 + + +## 1. 测试结论汇总 + +- 推理相关: + +| 算法名称 | 模型名称 | device_CPU | device_GPU | +| :-------------: | :------------------------------------------: | :--------: | :--------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_KL | 支持 | 支持 | +| MobileNetV3 | MobileNetV3_large_x1_0_PACT | 支持 | 支持 | +| PP-ShiTu | PPShiTu_general_rec、PPShiTu_mainbody_det | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_KL | 支持 | 支持 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5_PACT | 支持 | 支持 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | +| PPHGNet | PPHGNet_small_KL | 支持 | 支持 | +| PPHGNet | PPHGNet_small_PACT | 支持 | 支持 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_KL | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_0_PACT | 支持 | 支持 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | +| PPLCNetV2 | PPLCNetV2_base_KL | 支持 | 支持 | +| ResNet | ResNet50 | 支持 | 支持 | +| ResNet | ResNet50_vd | 支持 | 支持 | +| ResNet | ResNet50_vd_KL | 支持 | 支持 | +| ResNet | ResNet50_vd_PACT | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_KL | 支持 | 支持 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224_PACT | 支持 | 支持 | + + +## 2. 测试流程 + +### 2.1 准备数据 + +分类模型默认使用`./deploy/paddleserving/daisy.jpg`作为测试输入图片,无需下载 +识别模型默认使用`drink_dataset_v1.0/test_images/001.jpeg`作为测试输入图片,在**2.2 准备环境**中会下载好。 + +### 2.2 准备环境 + + +- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 + ```shell + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` + +- 安装依赖 + ```shell + python3.7 -m pip install -r requirements.txt + ``` + +- 安装 PaddleServing 相关组件,包括serving-server、serving_client、serving-app,自动下载并解压推理模型 + ```bash + # 安装必要依赖包 + python3.7 -m pip install paddle_serving_client==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + python3.7 -m pip install paddle-serving-app==0.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple + python3.7 -m pip install install paddle-serving-server-gpu==0.9.0.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 测试PP-ShiTu识别模型时需安装faiss包 + python3.7-m pip install faiss-cpu==1.7.1post2 -i https://pypi.tuna.tsinghua.edu.cn/simple + + # 下载模型与数据 + bash test_tipc/prepare.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer + ``` + +### 2.3 功能测试 + +测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 + +```bash +bash test_tipc/test_serving_infer_python.sh ${your_params_file} ${mode} +``` + +以`ResNet50`的`Linux GPU/CPU PYTHON 服务化部署测试`为例,命令如下所示。 + + +```bash +bash test_tipc/test_serving_infer_python.sh test_tipc/configs/ResNet50/ResNet50_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt serving_infer +``` + +输出结果如下,表示命令运行成功。 + +``` +Run successfully with command - python3.7 pipeline_http_client.py > ../../test_tipc/output/ResNet50/server_infer_gpu_pipeline_http_batchsize_1.log 2>&1! +Run successfully with command - python3.7 pipeline_http_client.py > ../../test_tipc/output/ResNet50/server_infer_cpu_pipeline_http_batchsize_1.log 2>&1 ! +``` + +预测结果会自动保存在 `./test_tipc/output/ResNet50/server_infer_gpu_pipeline_http_batchsize_1.log` ,可以看到 PaddleServing 的运行结果: + +``` +{'err_no': 0, 'err_msg': '', 'key': ['label', 'prob'], 'value': ["['daisy']", '[0.998314619064331]']} +``` + + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/docs/test_train_amp_inference_python.md b/test_tipc/docs/test_train_amp_inference_python.md new file mode 100644 index 0000000000000000000000000000000000000000..a6a5897d3f1afeab333ad40d7b8b8f926b92c68e --- /dev/null +++ b/test_tipc/docs/test_train_amp_inference_python.md @@ -0,0 +1,115 @@ +# Linux GPU/CPU 混合精度训练推理测试 + +Linux GPU/CPU 混合精度训练推理测试的主程序为`test_train_inference_python.sh`,可以测试基于Python的模型混合精度(默认O2)训练、评估、推理等基本功能。 + +## 1. 测试结论汇总 + +- 训练相关: + +| 算法名称 | 模型名称 | 单机单卡 | 单机多卡 | +| :-------------: | :-------------------------------------: | :----------: | :----------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 混合精度训练 | 混合精度训练 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | 混合精度训练 | 混合精度训练 | +| PPHGNet | PPHGNet_small | 混合精度训练 | 混合精度训练 | +| PPHGNet | PPHGNet_tiny | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x0_25 | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x0_35 | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x0_5 | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x0_75 | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x1_0 | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x1_5 | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x2_0 | 混合精度训练 | 混合精度训练 | +| PPLCNet | PPLCNet_x2_5 | 混合精度训练 | 混合精度训练 | +| PPLCNetV2 | PPLCNetV2_base | 混合精度训练 | 混合精度训练 | +| ResNet | ResNet50 | 混合精度训练 | 混合精度训练 | +| ResNet | ResNet50_vd | 混合精度训练 | 混合精度训练 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 混合精度训练 | 混合精度训练 | + +- 推理相关: + +| 算法名称 | 模型名称 | device_CPU | device_GPU | batchsize | +| :-------------: | :-------------------------------------: | :--------: | :--------: | :-------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | 1 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | 支持 | 支持 | 1 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | 1 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | 1 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | 1 | +| ResNet | ResNet50 | 支持 | 支持 | 1 | +| ResNet | ResNet50_vd | 支持 | 支持 | 1 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | 1 | + +## 2. 测试流程 + +以下测试流程以 PPLCNet_x1_0 模型为例。 + +### 2.1 准备环境 + +- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 + ```bash + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` + +- 安装PaddleSlim + ```bash + python3.7 -m pip install paddleslim==2.2.0 + ``` + +- 安装依赖 + ```bash + python3.7 -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple + ``` + +- 安装AutoLog(规范化日志输出工具) + ```bash + python3.7 -m pip install https://paddleocr.bj.bcebos.com/libs/auto_log-1.2.0-py3-none-any.whl + ``` + +### 2.2 准备数据和模型 + +```bash +bash test_tipc/prepare.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt lite_train_lite_infer +``` +### 2.3 功能测试 + +测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 + +```bash +bash test_tipc/test_train_inference_python.sh ${your_params_file} lite_train_lite_infer +``` + +以`PPLCNet_x1_0`的`Linux GPU/CPU 混合精度训练推理测试`为例,命令如下所示。 + +```bash +bash test_tipc/test_train_inference_python.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt lite_train_lite_infer +``` + +输出结果如下,表示命令运行成功。 + +```log +Run successfully with command - PPLCNet_x1_0 - python3.7 tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False -o Global.device=gpu -o Global.output_dir=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0_autocast_null -o Global.epochs=2 -o DataLoader.Train.sampler.batch_size=8 ! +Run successfully with command - PPLCNet_x1_0 - python3.7 tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.pretrained_model=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0_autocast_null/PPLCNet_x1_0/latest -o Global.device=gpu ! +Run successfully with command - PPLCNet_x1_0 - python3.7 tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.pretrained_model=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0_autocast_null/PPLCNet_x1_0/latest -o Global.save_inference_dir=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0_autocast_null! +Run successfully with command - PPLCNet_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=True -o Global.use_tensorrt=False -o Global.use_fp16=False -o Global.inference_model_dir=.././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0_autocast_null -o Global.batch_size=1 -o Global.infer_imgs=../dataset/ILSVRC2012/val -o Global.benchmark=False > .././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/infer_gpu_usetrt_False_precision_False_batchsize_1.log 2>&1 ! +Run successfully with command - PPLCNet_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=False -o Global.enable_mkldnn=False -o Global.cpu_num_threads=6 -o Global.inference_model_dir=.././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0_autocast_null -o Global.batch_size=1 -o Global.infer_imgs=../dataset/ILSVRC2012/val -o Global.benchmark=False > .././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/infer_cpu_usemkldnn_False_threads_6_batchsize_1.log 2>&1 ! +Run successfully with command - PPLCNet_x1_0 - python3.7 -m paddle.distributed.launch --gpus=0,1 tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o AMP.scale_loss=65536 -o AMP.use_dynamic_loss_scaling=True -o AMP.level=O2 -o Optimizer.multi_precision=True -o Global.eval_during_train=False -o Global.device=gpu -o Global.output_dir=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0,1_autocast_null -o Global.epochs=2 -o DataLoader.Train.sampler.batch_size=8 ! +Run successfully with command - PPLCNet_x1_0 - python3.7 tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.pretrained_model=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0,1_autocast_null/PPLCNet_x1_0/latest -o Global.device=gpu ! +Run successfully with command - PPLCNet_x1_0 - python3.7 tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.pretrained_model=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0,1_autocast_null/PPLCNet_x1_0/latest -o Global.save_inference_dir=./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0,1_autocast_null! +Run successfully with command - PPLCNet_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=True -o Global.use_tensorrt=False -o Global.use_fp16=False -o Global.inference_model_dir=.././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0,1_autocast_null -o Global.batch_size=1 -o Global.infer_imgs=../dataset/ILSVRC2012/val -o Global.benchmark=False > .././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/infer_gpu_usetrt_False_precision_False_batchsize_1.log 2>&1 ! +Run successfully with command - PPLCNet_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=False -o Global.enable_mkldnn=False -o Global.cpu_num_threads=6 -o Global.inference_model_dir=.././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/amp_train_gpus_0,1_autocast_null -o Global.batch_size=1 -o Global.infer_imgs=../dataset/ILSVRC2012/val -o Global.benchmark=False > .././test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/infer_cpu_usemkldnn_False_threads_6_batchsize_1.log 2>&1 ! +``` + +该信息可以在运行log中查看,以`PPLCNet_x1_0`为例,log位置在`./test_tipc/output/PPLCNet_x1_0/lite_train_lite_infer/results_python.log`。 + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/docs/test_train_fleet_inference_python.md b/test_tipc/docs/test_train_fleet_inference_python.md new file mode 100644 index 0000000000000000000000000000000000000000..272a845a153313f88288249a312c159a9ef7329a --- /dev/null +++ b/test_tipc/docs/test_train_fleet_inference_python.md @@ -0,0 +1,144 @@ +# Linux GPU/CPU 多机多卡训练推理测试 + +Linux GPU/CPU 多机多卡训练推理测试的主程序为`test_train_inference_python.sh`,可以测试基于Python的多机多卡模型训练、评估、推理等基本功能。 + +## 1. 测试结论汇总 + +- 训练相关: + + | 算法名称 | 模型名称 | 多机多卡 | + | :-------------: | :-------------------------------------: | :------: | + | MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | + | PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | 支持 | + | PPHGNet | PPHGNet_small | 支持 | + | PPHGNet | PPHGNet_tiny | 支持 | + | PPLCNet | PPLCNet_x0_25 | 支持 | + | PPLCNet | PPLCNet_x0_35 | 支持 | + | PPLCNet | PPLCNet_x0_5 | 支持 | + | PPLCNet | PPLCNet_x0_75 | 支持 | + | PPLCNet | PPLCNet_x1_0 | 支持 | + | PPLCNet | PPLCNet_x1_5 | 支持 | + | PPLCNet | PPLCNet_x2_0 | 支持 | + | PPLCNet | PPLCNet_x2_5 | 支持 | + | PPLCNetV2 | PPLCNetV2_base | 支持 | + | ResNet | ResNet50 | 支持 | + | ResNet | ResNet50_vd | 支持 | + | SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | + +- 推理相关: + + | 算法名称 | 模型名称 | device_CPU | device_GPU | batchsize | + | :-------------: | :-------------------------------------: | :--------: | :--------: | :-------: | + | MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | 1 | + | PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | 支持 | 支持 | 1 | + | PPHGNet | PPHGNet_small | 支持 | 支持 | 1 | + | PPHGNet | PPHGNet_tiny | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | 1 | + | PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | 1 | + | PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | 1 | + | ResNet | ResNet50 | 支持 | 支持 | 1 | + | ResNet | ResNet50_vd | 支持 | 支持 | 1 | + | SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | 1 | + + +## 2. 测试流程 + +运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 + +**下面以 PPLCNet_x1_0 模型为例,介绍测试流程** + +### 2.1 功能测试 + +#### 2.1.1 修改配置文件 + +首先,修改配置文件`test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt`中的`gpu_list`设置:假设两台机器的`ip`地址分别为`192.168.0.1`和`192.168.0.2`,则对应的配置文件`gpu_list`字段需要修改为`gpu_list:192.168.0.1,192.168.0.2;0,1`。 + +**`ip`地址查看命令为`ifconfig`,在`inet addr:`字段后的即为ip地址**。 + + +#### 2.1.2 准备数据 + +运行`prepare.sh`准备数据和模型,数据准备命令如下所示。 + +```shell +bash test_tipc/prepare.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt lite_train_lite_infer +``` + +**注意:** 由于是多机训练,这里需要在所有节点上都运行一次启动上述命令来准备数据。 + +#### 2.1.3 修改起始端口开始测试 + +在多机的节点上使用下面的命令设置分布式的起始端口(否则后面运行的时候会由于无法找到运行端口而hang住),一般建议设置在`10000~20000`之间。 + +```shell +export FLAGS_START_PORT=17000 +``` +**注意:** 上述修改起始端口命令同样需要在所有节点上都执行一次。 + +接下来就可以开始执行测试,命令如下所示。 +```shell +bash test_tipc/test_train_inference_python.sh test_tipc/configs/PPLCNet/PPLCNet_x1_0_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt +``` + +**注意:** 由于是多机训练,这里需要在所有的节点上均运行启动上述命令进行测试。 + + +#### 2.1.4 输出结果 + +输出结果保存在`test_tipc/output/PPLCNet_x1_0/results_python.log`,内容如下,以`Run successfully`开头表示测试命令正常,否则为测试失败。 + +```bash +Run successfully with command - python3.7 -m paddle.distributed.launch --ips=192.168.0.1,192.168.0.2 --gpus=0,1 tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataL +oader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.device=gpu -o Global.output_dir=./test_tipc/output/PPLCNet_x1_0/norm_train_gpus_0, +1_autocast_null_nodes_2 -o Global.epochs=2 -o DataLoader.Train.sampler.batch_size=8 ! +... +... +Run successfully with command - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=False -o Global.enable_mkldnn=True -o Global.cpu_num_threads=1 -o Global.inference_model_dir=.././t +est_tipc/output/PPLCNet_x1_0/norm_train_gpus_0,1_autocast_null_nodes_2 -o Global.batch_size=16 -o Global.infer_imgs=../dataset/ILSVRC2012/val -o Global.benchmark=True > .././test_tipc/output/PPLCNet_x1_0/infer_cpu_us +emkldnn_True_threads_1_batchsize_16.log 2>&1 ! +``` + +可以在配置文件中设置`-o Global.benchmark:True`,表示开启benchmark选项,此时就能得到测试的详细数据,包含运行环境信息(系统版本、CUDA版本、CUDNN版本、驱动版本),Paddle版本信息,参数设置信息(运行设备、线程数、是否开启内存优化等),模型信息(模型名称、精度),数据信息(batchsize、是否为动态shape等),性能信息(CPU,GPU的占用、运行耗时、预处理耗时、推理耗时、后处理耗时),内容如下所示: + +```log +[2022/06/07 17:01:41] root INFO: ---------------------- Env info ---------------------- +[2022/06/07 17:01:41] root INFO: OS_version: CentOS 6.10 +[2022/06/07 17:01:41] root INFO: CUDA_version: 10.1.243 +[2022/06/07 17:01:41] root INFO: CUDNN_version: None.None.None +[2022/06/07 17:01:41] root INFO: drivier_version: 460.32.03 +[2022/06/07 17:01:41] root INFO: ---------------------- Paddle info ---------------------- +[2022/06/07 17:01:41] root INFO: paddle_version: 2.3.0-rc0 +[2022/06/07 17:01:41] root INFO: paddle_version: 2.3.0-rc0 +[2022/06/07 17:01:41] root INFO: paddle_commit: 5d4980c052583fec022812d9c29460aff7cdc18b +[2022/06/07 17:01:41] root INFO: log_api_version: 1.0 +[2022/06/07 17:01:41] root INFO: ----------------------- Conf info ----------------------- +[2022/06/07 17:01:41] root INFO: runtime_device: cpu +[2022/06/07 17:01:41] root INFO: ir_optim: True +[2022/06/07 17:01:41] root INFO: enable_memory_optim: True +[2022/06/07 17:01:41] root INFO: enable_tensorrt: False +[2022/06/07 17:01:41] root INFO: enable_mkldnn: False +[2022/06/07 17:01:41] root INFO: cpu_math_library_num_threads: 6 +[2022/06/07 17:01:41] root INFO: ----------------------- Model info ---------------------- +[2022/06/07 17:01:41] root INFO: model_name: cls +[2022/06/07 17:01:41] root INFO: precision: fp32 +[2022/06/07 17:01:41] root INFO: ----------------------- Data info ----------------------- +[2022/06/07 17:01:41] root INFO: batch_size: 16 +[2022/06/07 17:01:41] root INFO: input_shape: [3, 224, 224] +[2022/06/07 17:01:41] root INFO: data_num: 3 +[2022/06/07 17:01:41] root INFO: ----------------------- Perf info ----------------------- +[2022/06/07 17:01:41] root INFO: cpu_rss(MB): 726.5586, gpu_rss(MB): None, gpu_util: None% +[2022/06/07 17:01:41] root INFO: total time spent(s): 0.3527 +[2022/06/07 17:01:41] root INFO: preprocess_time(ms): 33.2723, inference_time(ms): 317.9824, postprocess_time(ms): 1.4579 +``` + +该信息可以在运行log中查看,log位置在`test_tipc/output/PPLCNet_x1_0/infer_gpu_usetrt_True_precision_True_batchsize_1.log`。 + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 + +**注意:** 由于分布式训练时,仅在`trainer_id=0`所在的节点中保存模型,因此其他的节点中在运行模型导出与推理时会因为找不到保存的模型而报错,为正常现象。 diff --git a/test_tipc/docs/test_train_inference_python.md b/test_tipc/docs/test_train_inference_python.md index ef25e14513044b65cc8573e3a3371f93f704c69f..fb808e2af94cc5ada2f2f25cbb81922a21876347 100644 --- a/test_tipc/docs/test_train_inference_python.md +++ b/test_tipc/docs/test_train_inference_python.md @@ -58,36 +58,36 @@ Linux端基础训练预测功能测试的主程序为`test_train_inference_pytho - 模式1:lite_train_lite_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度; ```shell -bash test_tipc/prepare.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_lite_infer' -bash test_tipc/test_train_inference_python.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_lite_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_lite_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_lite_infer' ``` - 模式2:lite_train_whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理; ```shell -bash test_tipc/prepare.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_whole_infer' -bash test_tipc/test_train_inference_python.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_whole_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'lite_train_whole_infer' ``` - 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; ```shell -bash test_tipc/prepare.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'whole_infer' # 用法1: -bash test_tipc/test_train_inference_python.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'whole_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'whole_infer' # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 -bash test_tipc/test_train_inference_python.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'whole_infer' '1' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'whole_infer' '1' ``` - 模式4:whole_train_whole_infer,CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度; ```shell -bash test_tipc/prepare.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'whole_train_whole_infer' -bash test_tipc/test_train_inference_python.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'whole_train_whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'whole_train_whole_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'whole_train_whole_infer' ``` - 模式5: klquant_whole_infer,测试离线量化; ```shell -bash test_tipc/prepare.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'klquant_whole_infer' -bash test_tipc/test_train_inference_python.sh ./test_tipc/config/ResNet/ResNet50_vd_train_infer_python.txt 'klquant_whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'klquant_whole_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ResNet/ResNet50_vd_train_infer_python.txt 'klquant_whole_infer' ``` diff --git a/test_tipc/docs/test_train_pact_inference_python.md b/test_tipc/docs/test_train_pact_inference_python.md new file mode 100644 index 0000000000000000000000000000000000000000..6aeecad78a27172ea54aa2bbe318f68a5d0ee188 --- /dev/null +++ b/test_tipc/docs/test_train_pact_inference_python.md @@ -0,0 +1,106 @@ +# Linux GPU/CPU PACT量化训练推理测试 + +Linux GPU/CPU PACT量化训练推理测试的主程序为`test_train_inference_python.sh`,可以测试基于Python的模型PACT在线量化等基本功能。 + +## 1. 测试结论汇总 + +- 训练相关: + +| 算法名称 | 模型名称 | 单机单卡 | +| :-------------: | :-------------------------------------: | :----------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | PACT量化训练 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | PACT量化训练 | +| PPHGNet | PPHGNet_small | PACT量化训练 | +| PPHGNet | PPHGNet_tiny | PACT量化训练 | +| PPLCNet | PPLCNet_x0_25 | PACT量化训练 | +| PPLCNet | PPLCNet_x0_35 | PACT量化训练 | +| PPLCNet | PPLCNet_x0_5 | PACT量化训练 | +| PPLCNet | PPLCNet_x0_75 | PACT量化训练 | +| PPLCNet | PPLCNet_x1_0 | PACT量化训练 | +| PPLCNet | PPLCNet_x1_5 | PACT量化训练 | +| PPLCNet | PPLCNet_x2_0 | PACT量化训练 | +| PPLCNet | PPLCNet_x2_5 | PACT量化训练 | +| PPLCNetV2 | PPLCNetV2_base | PACT量化训练 | +| ResNet | ResNet50 | PACT量化训练 | +| ResNet | ResNet50_vd | PACT量化训练 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | PACT量化训练 | + +- 推理相关: + +| 算法名称 | 模型名称 | device_CPU | device_GPU | batchsize | +| :-------------: | :-------------------------------------: | :--------: | :--------: | :-------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | 支持 | 支持 | 1 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | 支持 | 支持 | 1 | +| PPHGNet | PPHGNet_small | 支持 | 支持 | 1 | +| PPHGNet | PPHGNet_tiny | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_25 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_35 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_5 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x0_75 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x1_0 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x1_5 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x2_0 | 支持 | 支持 | 1 | +| PPLCNet | PPLCNet_x2_5 | 支持 | 支持 | 1 | +| PPLCNetV2 | PPLCNetV2_base | 支持 | 支持 | 1 | +| ResNet | ResNet50 | 支持 | 支持 | 1 | +| ResNet | ResNet50_vd | 支持 | 支持 | 1 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | 支持 | 支持 | 1 | + +## 2. 测试流程 + +一下测试流程以 MobileNetV3_large_x1_0 模型为例。 + +### 2.1 准备环境 + +- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 + ```bash + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` + +- 安装PaddleSlim + ```bash + python3.7 -m pip install paddleslim==2.2.0 + ``` + +- 安装依赖 + ```bash + python3.7 -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple + ``` + +- 安装AutoLog(规范化日志输出工具) + ```bash + python3.7 -m pip install https://paddleocr.bj.bcebos.com/libs/auto_log-1.2.0-py3-none-any.whl + ``` + +### 2.2 准备数据和模型 + +```bash +bash test_tipc/prepare.sh test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_pact_infer_python.txt lite_train_lite_infer +``` + +在线量化的操作流程,可参考[文档](../../deploy/slim/README.md)。 + +### 2.3 功能测试 + +以`MobileNetV3_large_x1_0`的`Linux GPU/CPU PACT在线量化训练推理测试`为例,命令如下所示。 + +```bash +bash test_tipc/test_train_inference_python.sh test_tipc/configs/PPLCNetV2/PPLCNetV2_base_train_pact_infer_python.txt lite_train_lite_infer +``` + +输出结果如下,表示命令运行成功。 + +```log +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Slim.quant.name=pact -o Optimizer.lr.learning_rate=0.01 -o Global.device=gpu -o Global.output_dir=./test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/pact_train_gpus_0_autocast_null -o Global.epochs=2 -o DataLoader.Train.sampler.batch_size=8 ! +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Slim.quant.name=pact -o Global.pretrained_model=./test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/pact_train_gpus_0_autocast_null/MobileNetV3_large_x1_0/latest -o Global.device=gpu ! +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Slim.quant.name=pact -o Global.pretrained_model=./test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/pact_train_gpus_0_autocast_null/MobileNetV3_large_x1_0/latest -o Global.save_inference_dir=./test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/pact_train_gpus_0_autocast_null! +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=True -o Global.use_tensorrt=False -o Global.use_fp16=False -o Global.inference_model_dir=.././test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/pact_train_gpus_0_autocast_null -o Global.batch_size=1 -o Global.infer_imgs=../dataset/ILSVRC2012/val -o Global.benchmark=True > .././test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/infer_gpu_usetrt_False_precision_False_batchsize_1.log 2>&1 ! +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=False -o Global.enable_mkldnn=False -o Global.cpu_num_threads=1 -o Global.inference_model_dir=.././test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/pact_train_gpus_0_autocast_null -o Global.batch_size=1 -o Global.infer_imgs=../dataset/ILSVRC2012/val -o Global.benchmark=True > .././test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer/infer_cpu_usemkldnn_False_threads_1_batchsize_1.log 2>&1 ! +``` +同时,测试过程中的日志保存在`PaddleClas/test_tipc/output/MobileNetV3_large_x1_0/lite_train_lite_infer`下。 + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/docs/test_train_ptq_inference_python.md b/test_tipc/docs/test_train_ptq_inference_python.md new file mode 100644 index 0000000000000000000000000000000000000000..29d5b9f59b31d96dd5fe2b325853fee911a87c97 --- /dev/null +++ b/test_tipc/docs/test_train_ptq_inference_python.md @@ -0,0 +1,105 @@ +# Linux GPU/CPU KL离线量化推理测试 + +Linux GPU/CPU KL离线量化推理测试的主程序为`test_ptq_inference_python.sh`,可以测试基于Python的模型KL离线量化推理等基本功能。 + +## 1. 测试结论汇总 + +- KL离线量化: + +| 算法名称 | 模型名称 | CPU | +| :-------------: | :-------------------------------------: | :----------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | KL离线量化 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | KL离线量化 | +| PPHGNet | PPHGNet_small | KL离线量化 | +| PPHGNet | PPHGNet_tiny | KL离线量化 | +| PPLCNet | PPLCNet_x0_25 | KL离线量化 | +| PPLCNet | PPLCNet_x0_35 | KL离线量化 | +| PPLCNet | PPLCNet_x0_5 | KL离线量化 | +| PPLCNet | PPLCNet_x0_75 | KL离线量化 | +| PPLCNet | PPLCNet_x1_0 | KL离线量化 | +| PPLCNet | PPLCNet_x1_5 | KL离线量化 | +| PPLCNet | PPLCNet_x2_0 | KL离线量化 | +| PPLCNet | PPLCNet_x2_5 | KL离线量化 | +| PPLCNetV2 | PPLCNetV2_base | KL离线量化 | +| ResNet | ResNet50 | KL离线量化 | +| ResNet | ResNet50_vd | KL离线量化 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | KL离线量化 | + +- 推理相关: + +| 算法名称 | 模型名称 | CPU | +| :-------------: | :-------------------------------------: | :----------: | +| MobileNetV3 | MobileNetV3_large_x1_0 | KL离线量化 | +| PP-ShiTu | GeneralRecognition_PPLCNet_x2_5 | KL离线量化 | +| PPHGNet | PPHGNet_small | KL离线量化 | +| PPHGNet | PPHGNet_tiny | KL离线量化 | +| PPLCNet | PPLCNet_x0_25 | KL离线量化 | +| PPLCNet | PPLCNet_x0_35 | KL离线量化 | +| PPLCNet | PPLCNet_x0_5 | KL离线量化 | +| PPLCNet | PPLCNet_x0_75 | KL离线量化 | +| PPLCNet | PPLCNet_x1_0 | KL离线量化 | +| PPLCNet | PPLCNet_x1_5 | KL离线量化 | +| PPLCNet | PPLCNet_x2_0 | KL离线量化 | +| PPLCNet | PPLCNet_x2_5 | KL离线量化 | +| PPLCNetV2 | PPLCNetV2_base | KL离线量化 | +| ResNet | ResNet50 | KL离线量化 | +| ResNet | ResNet50_vd | KL离线量化 | +| SwinTransformer | SwinTransformer_tiny_patch4_window7_224 | KL离线量化 | + + +## 2. 测试流程 + +以下测试流程以 MobileNetV3_large_x1_0 模型为例。 + +### 2.1 准备环境 + +- 安装PaddlePaddle:如果您已经安装了2.2或者以上版本的paddlepaddle,那么无需运行下面的命令安装paddlepaddle。 + ```bash + # 需要安装2.2及以上版本的Paddle + # 安装GPU版本的Paddle + python3.7 -m pip install paddlepaddle-gpu==2.2.0 + # 安装CPU版本的Paddle + python3.7 -m pip install paddlepaddle==2.2.0 + ``` + +- 安装PaddleSlim + ```bash + python3.7 -m pip install paddleslim==2.2.0 + ``` + +- 安装依赖 + ```bash + python3.7 -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple + ``` + +- 安装AutoLog(规范化日志输出工具) + ```bash + python3.7 -m pip install https://paddleocr.bj.bcebos.com/libs/auto_log-1.2.0-py3-none-any.whl + ``` + +### 2.2 准备数据和模型 + +```bash +bash test_tipc/prepare.sh test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt whole_infer +``` + +离线量化的操作流程,可参考[文档](../../deploy/slim/README.md)。 + +### 2.3 功能测试 + +以`MobileNetV3_large_x1_0`的`Linux GPU/CPU KL离线量化训练推理测试`为例,命令如下所示。 + +```bash +bash test_tipc/test_ptq_inference_python.sh test_tipc/configs/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt whole_infer +``` + +输出结果如下,表示命令运行成功。 + +```log +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./MobileNetV3_large_x1_0_infer! +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=True -o Global.use_tensorrt=False -o Global.use_fp16=False -o Global.inference_model_dir=.././MobileNetV3_large_x1_0_infer//quant_post_static_model -o Global.batch_size=1 -o Global.infer_imgs=../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg -o Global.benchmark=False > .././test_tipc/output/MobileNetV3_large_x1_0/whole_infer/infer_gpu_usetrt_False_precision_False_batchsize_1.log 2>&1 ! +Run successfully with command - MobileNetV3_large_x1_0 - python3.7 python/predict_cls.py -c configs/inference_cls.yaml -o Global.use_gpu=False -o Global.enable_mkldnn=False -o Global.cpu_num_threads=1 -o Global.inference_model_dir=.././MobileNetV3_large_x1_0_infer//quant_post_static_model -o Global.batch_size=1 -o Global.infer_imgs=../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg -o Global.benchmark=False > .././test_tipc/output/MobileNetV3_large_x1_0/whole_infer/infer_cpu_usemkldnn_False_threads_1_batchsize_1.log 2>&1 ! +``` +同时,测试过程中的日志保存在`PaddleClas/test_tipc/output/MobileNetV3_large_x1_0/whole_infer`下。 + +如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/generate_cpp_yaml.py b/test_tipc/generate_cpp_yaml.py index 2e541de33a47bb3a940a3d5fadc0ddf436bb50b9..fdd5ee2e2fa2c31085a88dca0479a01a944e10cb 100644 --- a/test_tipc/generate_cpp_yaml.py +++ b/test_tipc/generate_cpp_yaml.py @@ -66,6 +66,10 @@ def main(): "test_images") config["IndexProcess"]["index_dir"] = os.path.join(args.data_dir, "index") + config["IndexProcess"]["image_root"] = os.path.join(args.data_dir, + "gallery") + config["IndexProcess"]["data_file"] = os.path.join(args.data_dir, + "drink_label.txt") assert args.cls_model_dir assert args.det_model_dir config["Global"]["det_inference_model_dir"] = args.det_model_dir diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 70040dc8b28656f7fb3e1384f840f068437dcf7e..15adf957eb8eb25780d2053129e91eb5e98b3d33 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -2,8 +2,7 @@ FILENAME=$1 # MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', -# 'whole_infer', 'klquant_whole_infer', -# 'cpp_infer', 'serving_infer', 'lite_infer'] +# 'whole_infer', 'cpp_infer', 'serving_infer', 'lite_infer'] MODE=$2 @@ -12,7 +11,7 @@ dataline=$(cat ${FILENAME}) IFS=$'\n' lines=(${dataline}) -function func_parser_key(){ +function func_parser_key() { strs=$1 IFS=":" array=(${strs}) @@ -20,134 +19,240 @@ function func_parser_key(){ echo ${tmp} } -function func_parser_value(){ +function func_parser_value() { strs=$1 IFS=":" array=(${strs}) if [ ${#array[*]} = 2 ]; then echo ${array[1]} else - IFS="|" - tmp="${array[1]}:${array[2]}" + IFS="|" + tmp="${array[1]}:${array[2]}" echo ${tmp} fi } -function func_get_url_file_name(){ +function func_get_url_file_name() { strs=$1 IFS="/" array=(${strs}) - tmp=${array[${#array[@]}-1]} + tmp=${array[${#array[@]} - 1]} echo ${tmp} } model_name=$(func_parser_value "${lines[1]}") -if [ ${MODE} = "cpp_infer" ];then - if [[ $FILENAME == *infer_cpp_linux_gpu_cpu.txt ]];then - cpp_type=$(func_parser_value "${lines[2]}") - cls_inference_model_dir=$(func_parser_value "${lines[3]}") - det_inference_model_dir=$(func_parser_value "${lines[4]}") - cls_inference_url=$(func_parser_value "${lines[5]}") - det_inference_url=$(func_parser_value "${lines[6]}") - - if [[ $cpp_type == "cls" ]];then - eval "wget -nc $cls_inference_url" - tar xf "${model_name}_inference.tar" - eval "mv inference $cls_inference_model_dir" - cd dataset - rm -rf ILSVRC2012 - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar - tar xf whole_chain_infer.tar - ln -s whole_chain_infer ILSVRC2012 - cd .. - elif [[ $cpp_type == "shitu" ]];then - eval "wget -nc $cls_inference_url" - tar_name=$(func_get_url_file_name "$cls_inference_url") - model_dir=${tar_name%.*} - eval "tar xf ${tar_name}" - eval "mv ${model_dir} ${cls_inference_model_dir}" - - eval "wget -nc $det_inference_url" - tar_name=$(func_get_url_file_name "$det_inference_url") - model_dir=${tar_name%.*} - eval "tar xf ${tar_name}" - eval "mv ${model_dir} ${det_inference_model_dir}" - cd dataset - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar - tar -xf drink_dataset_v1.0.tar - else - echo "Wrong cpp type in config file in line 3. only support cls, shitu" - fi - exit 0 - else - echo "use wrong config file" - exit 1 - fi +if [[ ${MODE} = "cpp_infer" ]]; then + if [ -d "./deploy/cpp/opencv-3.4.7/opencv3/" ] && [ $(md5sum ./deploy/cpp/opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ]; then + echo "################### build opencv skipped ###################" + else + echo "################### build opencv ###################" + rm -rf ./deploy/cpp/opencv-3.4.7.tar.gz ./deploy/cpp/opencv-3.4.7/ + pushd ./deploy/cpp/ + wget -nc https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz + tar -xf opencv-3.4.7.tar.gz + + cd opencv-3.4.7/ + install_path=$(pwd)/opencv3 + rm -rf build + mkdir build + cd build + + cmake .. \ + -DCMAKE_INSTALL_PREFIX=${install_path} \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DWITH_IPP=OFF \ + -DBUILD_IPP_IW=OFF \ + -DWITH_LAPACK=OFF \ + -DWITH_EIGEN=OFF \ + -DCMAKE_INSTALL_LIBDIR=lib64 \ + -DWITH_ZLIB=ON \ + -DBUILD_ZLIB=ON \ + -DWITH_JPEG=ON \ + -DBUILD_JPEG=ON \ + -DWITH_PNG=ON \ + -DBUILD_PNG=ON \ + -DWITH_TIFF=ON \ + -DBUILD_TIFF=ON + + make -j + make install + cd ../../ + popd + echo "################### build opencv finished ###################" + fi + if [[ ! -d "./deploy/cpp/paddle_inference/" ]]; then + pushd ./deploy/cpp/ + PADDLEInfer=$3 + if [ "" = "$PADDLEInfer" ]; then + wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz --no-check-certificate + tar xf paddle_inference.tgz + else + wget -nc ${PADDLEInfer} --no-check-certificate + tar_name=$(func_get_url_file_name "$PADDLEInfer") + tar xf ${tar_name} + if [ ! -d "paddle_inference" ]; then + ln -s paddle_inference_install_dir paddle_inference + fi + fi + popd + fi + if [[ $FILENAME == *infer_cpp_linux_gpu_cpu.txt ]]; then + cpp_type=$(func_parser_value "${lines[2]}") + cls_inference_model_dir=$(func_parser_value "${lines[3]}") + det_inference_model_dir=$(func_parser_value "${lines[4]}") + cls_inference_url=$(func_parser_value "${lines[5]}") + det_inference_url=$(func_parser_value "${lines[6]}") + + if [[ $cpp_type == "cls" ]]; then + eval "wget -nc $cls_inference_url" + tar_name=$(func_get_url_file_name "$cls_inference_url") + model_dir=${tar_name%.*} + eval "tar xf ${tar_name}" + + # move '_int8' suffix in pact models + if [[ ${tar_name} =~ "pact_infer" ]]; then + cd ${cls_inference_model_dir} + mv inference_int8.pdiparams inference.pdiparams + mv inference_int8.pdmodel inference.pdmodel + cd .. + fi + + cd dataset + rm -rf ILSVRC2012 + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar + tar xf whole_chain_infer.tar + ln -s whole_chain_infer ILSVRC2012 + cd .. + elif [[ $cpp_type == "shitu" ]]; then + eval "wget -nc $cls_inference_url" + tar_name=$(func_get_url_file_name "$cls_inference_url") + model_dir=${tar_name%.*} + eval "tar xf ${tar_name}" + + eval "wget -nc $det_inference_url" + tar_name=$(func_get_url_file_name "$det_inference_url") + model_dir=${tar_name%.*} + eval "tar xf ${tar_name}" + + cd dataset + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar + tar -xf drink_dataset_v1.0.tar + else + echo "Wrong cpp type in config file in line 3. only support cls, shitu" + fi + exit 0 + else + echo "use wrong config file" + exit 1 + fi fi model_name=$(func_parser_value "${lines[1]}") model_url_value=$(func_parser_value "${lines[35]}") model_url_key=$(func_parser_key "${lines[35]}") -if [[ $FILENAME == *GeneralRecognition* ]];then - cd dataset - rm -rf Aliproduct - rm -rf train_reg_all_data.txt - rm -rf demo_train - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar - tar -xf tipc_shitu_demo_data.tar - ln -s tipc_shitu_demo_data Aliproduct - ln -s tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt - ln -s tipc_shitu_demo_data/demo_train demo_train - cd tipc_shitu_demo_data - ln -s demo_test.txt val_list.txt - cd ../../ - eval "wget -nc $model_url_value" - mv general_PPLCNet_x2_5_pretrained_v1.0.pdparams GeneralRecognition_PPLCNet_x2_5_pretrained.pdparams - exit 0 +if [[ $model_name == *ShiTu* ]]; then + cd dataset + rm -rf Aliproduct + rm -rf train_reg_all_data.txt + rm -rf demo_train + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar --no-check-certificate + tar -xf tipc_shitu_demo_data.tar + ln -s tipc_shitu_demo_data Aliproduct + ln -s tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt + ln -s tipc_shitu_demo_data/demo_train demo_train + cd tipc_shitu_demo_data + ln -s demo_test.txt val_list.txt + cd ../../ + eval "wget -nc $model_url_value --no-check-certificate" + mv general_PPLCNet_x2_5_pretrained_v1.0.pdparams GeneralRecognition_PPLCNet_x2_5_pretrained.pdparams + exit 0 fi -if [[ $FILENAME == *use_dali* ]];then +if [[ $FILENAME == *use_dali* ]]; then python_name=$(func_parser_value "${lines[2]}") ${python_name} -m pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/nightly --upgrade nvidia-dali-nightly-cuda102 fi -if [ ${MODE} = "lite_train_lite_infer" ] || [ ${MODE} = "lite_train_whole_infer" ];then - # pretrain lite train data - cd dataset - rm -rf ILSVRC2012 - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_little_train.tar - tar xf whole_chain_little_train.tar - ln -s whole_chain_little_train ILSVRC2012 - cd ILSVRC2012 - mv train.txt train_list.txt - mv val.txt val_list.txt - cp -r train/* val/ - cd ../../ -elif [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ];then +if [[ ${MODE} = "lite_train_lite_infer" ]] || [[ ${MODE} = "lite_train_whole_infer" ]]; then + if [[ ${model_name} =~ "GeneralRecognition" ]]; then + cd dataset + rm -rf Aliproduct + rm -rf train_reg_all_data.txt + rm -rf demo_train + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar --no-check-certificate + tar -xf tipc_shitu_demo_data.tar + ln -s tipc_shitu_demo_data Aliproduct + ln -s tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt + ln -s tipc_shitu_demo_data/demo_train demo_train + cd tipc_shitu_demo_data + ln -s demo_test.txt val_list.txt + cd ../../ + else + # pretrain lite train data + cd dataset + rm -rf ILSVRC2012 + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_little_train.tar --no-check-certificate + tar xf whole_chain_little_train.tar + ln -s whole_chain_little_train ILSVRC2012 + cd ILSVRC2012 + mv train.txt train_list.txt + mv val.txt val_list.txt + cp -r train/* val/ + cd ../../ + fi + if [[ ${FILENAME} =~ "pact_infer" ]]; then + # download pretrained model for PACT training + pretrpretrained_model_url=$(func_parser_value "${lines[35]}") + mkdir pretrained_model + cd pretrained_model + wget -nc ${pretrpretrained_model_url} --no-check-certificate + cd .. + fi +elif [[ ${MODE} = "whole_infer" ]]; then # download data - cd dataset - rm -rf ILSVRC2012 - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar - tar xf whole_chain_infer.tar - ln -s whole_chain_infer ILSVRC2012 - cd ILSVRC2012 - mv val.txt val_list.txt - ln -s val_list.txt train_list.txt - cd ../../ + if [[ ${model_name} =~ "GeneralRecognition" ]]; then + cd dataset + rm -rf Aliproduct + rm -rf train_reg_all_data.txt + rm -rf demo_train + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar --no-check-certificate + tar -xf tipc_shitu_demo_data.tar + ln -s tipc_shitu_demo_data Aliproduct + ln -s tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt + ln -s tipc_shitu_demo_data/demo_train demo_train + cd tipc_shitu_demo_data + rm -rf val_list.txt + ln -s demo_test.txt val_list.txt + cd ../../ + else + cd dataset + rm -rf ILSVRC2012 + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar + tar xf whole_chain_infer.tar + ln -s whole_chain_infer ILSVRC2012 + cd ILSVRC2012 + mv val.txt val_list.txt + rm -rf train_list.txt + ln -s val_list.txt train_list.txt + cd ../../ + fi # download inference or pretrained model eval "wget -nc $model_url_value" - if [[ $model_url_key == *inference* ]]; then - rm -rf inference - tar xf "${model_name}_inference.tar" + if [[ ${model_url_value} =~ ".tar" ]]; then + tar_name=$(func_get_url_file_name "${model_url_value}") + echo $tar_name + rm -rf {tar_name} + tar xf ${tar_name} fi - if [[ $model_name == "SwinTransformer_large_patch4_window7_224" || $model_name == "SwinTransformer_large_patch4_window12_384" ]];then - cmd="mv ${model_name}_22kto1k_pretrained.pdparams ${model_name}_pretrained.pdparams" - eval $cmd + if [[ $model_name == "SwinTransformer_large_patch4_window7_224" || $model_name == "SwinTransformer_large_patch4_window12_384" ]]; then + cmd="mv ${model_name}_22kto1k_pretrained.pdparams ${model_name}_pretrained.pdparams" + eval $cmd fi -elif [ ${MODE} = "whole_train_whole_infer" ];then +elif [[ ${MODE} = "whole_train_whole_infer" ]]; then cd dataset rm -rf ILSVRC2012 wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_CIFAR100.tar @@ -157,33 +262,88 @@ elif [ ${MODE} = "whole_train_whole_infer" ];then mv train.txt train_list.txt mv test.txt val_list.txt cd ../../ + if [[ ${FILENAME} =~ "pact_infer" ]]; then + # download pretrained model for PACT training + pretrpretrained_model_url=$(func_parser_value "${lines[35]}") + mkdir pretrained_model + cd pretrained_model + wget -nc ${pretrpretrained_model_url} --no-check-certificate + cd .. + fi fi -if [ ${MODE} = "serving_infer" ];then +if [[ ${MODE} = "serving_infer" ]]; then # prepare serving env python_name=$(func_parser_value "${lines[2]}") - ${python_name} -m pip install install paddle-serving-server-gpu==0.6.1.post101 - ${python_name} -m pip install paddle_serving_client==0.6.1 - ${python_name} -m pip install paddle-serving-app==0.6.1 + if [[ ${model_name} = "PPShiTu" ]]; then + cls_inference_model_url=$(func_parser_value "${lines[3]}") + cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}") + det_inference_model_url=$(func_parser_value "${lines[4]}") + det_tar_name=$(func_get_url_file_name "${det_inference_model_url}") + cd ./deploy + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar --no-check-certificate + tar -xf drink_dataset_v1.0.tar + mkdir models + cd models + wget -nc ${cls_inference_model_url} && tar xf ${cls_tar_name} + wget -nc ${det_inference_model_url} && tar xf ${det_tar_name} + cd .. + else + cls_inference_model_url=$(func_parser_value "${lines[3]}") + cls_tar_name=$(func_get_url_file_name "${cls_inference_model_url}") + cd ./deploy/paddleserving + wget -nc ${cls_inference_model_url} + tar xf ${cls_tar_name} + + # move '_int8' suffix in pact models + if [[ ${cls_tar_name} =~ "pact_infer" ]]; then + cls_inference_model_dir=${cls_tar_name%%.tar} + cd ${cls_inference_model_dir} + mv inference_int8.pdiparams inference.pdiparams + mv inference_int8.pdmodel inference.pdmodel + cd .. + fi + + cd ../../ + fi unset http_proxy unset https_proxy - cd ./deploy/paddleserving - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar fi -if [ ${MODE} = "paddle2onnx_infer" ];then +if [[ ${MODE} = "paddle2onnx_infer" ]]; then # prepare paddle2onnx env python_name=$(func_parser_value "${lines[2]}") - ${python_name} -m pip install install paddle2onnx - ${python_name} -m pip install onnxruntime + inference_model_url=$(func_parser_value "${lines[10]}") + tar_name=${inference_model_url##*/} - # wget model - cd deploy && mkdir models && cd models - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar + ${python_name} -m pip install paddle2onnx + ${python_name} -m pip install onnxruntime + if [[ ${model_name} =~ "GeneralRecognition" ]]; then + cd dataset + rm -rf Aliproduct + rm -rf train_reg_all_data.txt + rm -rf demo_train + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar --no-check-certificate + tar -xf tipc_shitu_demo_data.tar + ln -s tipc_shitu_demo_data Aliproduct + ln -s tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt + ln -s tipc_shitu_demo_data/demo_train demo_train + cd tipc_shitu_demo_data + rm -rf val_list.txt + ln -s demo_test.txt val_list.txt + cd ../../ + eval "wget -nc $model_url_value --no-check-certificate" + mv general_PPLCNet_x2_5_pretrained_v1.0.pdparams GeneralRecognition_PPLCNet_x2_5_pretrained.pdparams + fi + cd deploy + mkdir models + cd models + wget -nc ${inference_model_url} + tar xf ${tar_name} cd ../../ fi -if [ ${MODE} = "benchmark_train" ];then +if [[ ${MODE} = "benchmark_train" ]]; then pip install -r requirements.txt cd dataset rm -rf ILSVRC2012 @@ -191,6 +351,7 @@ if [ ${MODE} = "benchmark_train" ];then tar xf ILSVRC2012_val.tar ln -s ILSVRC2012_val ILSVRC2012 cd ILSVRC2012 - ln -s val_list.txt train_list.txt + rm -rf train_list.txt + ln -s val_list.txt train_list.txt cd ../../ fi diff --git a/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_amp_fp16_DP.sh b/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_amp_fp16_DP.sh index 8ec70d35c3aed8814e43062f70223d4a2c5fffe8..f9f2f76665cb030fbf11414d4f313345a539b12c 100644 --- a/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_amp_fp16_DP.sh +++ b/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_amp_fp16_DP.sh @@ -8,5 +8,11 @@ num_workers=8 # get data bash test_tipc/static/${model_item}/benchmark_common/prepare.sh + +cd ./dataset/ILSVRC2012 +cat train_list.txt >> tmp +for i in {1..10}; do cat tmp >> train_list.txt; done +cd ../../ + # run bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_mode} ${device_num} ${max_epochs} ${num_workers} 2>&1; diff --git a/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_fp32_DP.sh b/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_fp32_DP.sh index 6ab1ec00cfc97b9b15e392bcc08ac5cde7a896e5..9f6ab183e28ab8d7243cbcedfa93215f590913f9 100644 --- a/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_fp32_DP.sh +++ b/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_fp32_DP.sh @@ -8,5 +8,11 @@ num_workers=8 # get data bash test_tipc/static/${model_item}/benchmark_common/prepare.sh + +cd ./dataset/ILSVRC2012 +cat train_list.txt >> tmp +for i in {1..10}; do cat tmp >> train_list.txt; done +cd ../../ + # run bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_mode} ${device_num} ${max_epochs} ${num_workers} 2>&1; diff --git a/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_pure_fp16_DP.sh b/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_pure_fp16_DP.sh index 672fb24660ba10f181098dc2c8b2cc52463bfc40..bef8186ea5e10feda6d62e1df01a41303b5c3469 100644 --- a/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_pure_fp16_DP.sh +++ b/test_tipc/static/ResNet50/N4C32/ResNet50_bs256_pure_fp16_DP.sh @@ -8,5 +8,11 @@ num_workers=8 # get data bash test_tipc/static/${model_item}/benchmark_common/prepare.sh + +cd ./dataset/ILSVRC2012 +cat train_list.txt >> tmp +for i in {1..10}; do cat tmp >> train_list.txt; done +cd ../../ + # run bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_mode} ${device_num} ${max_epochs} ${num_workers} 2>&1; diff --git a/test_tipc/static/ResNet50/benchmark_common/prepare.sh b/test_tipc/static/ResNet50/benchmark_common/prepare.sh index bdf34737f889c8baedc4e1d11bca00ea9fe8a00b..1dc3b7a47c239524c98f95a362d6257f503e9081 100644 --- a/test_tipc/static/ResNet50/benchmark_common/prepare.sh +++ b/test_tipc/static/ResNet50/benchmark_common/prepare.sh @@ -7,5 +7,7 @@ wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/ImageNet1k/ILSVR tar xf ILSVRC2012_val.tar ln -s ILSVRC2012_val ILSVRC2012 cd ILSVRC2012 -ln -s val_list.txt train_list.txt +for ((i=1; i<=4; i++));do + cat val_list.txt >> train_list.txt +done cd ../../ diff --git a/test_tipc/static/ResNet50/benchmark_common/run_benchmark.sh b/test_tipc/static/ResNet50/benchmark_common/run_benchmark.sh index c84ce46beb78226cf95d04f1b9f5f264882b318b..5146e6ccf65de82ec9e7b44cfb6a2f3f393bb12c 100644 --- a/test_tipc/static/ResNet50/benchmark_common/run_benchmark.sh +++ b/test_tipc/static/ResNet50/benchmark_common/run_benchmark.sh @@ -63,7 +63,7 @@ function _train(){ esac echo "train_cmd: ${train_cmd} log_file: ${log_file}" - timeout 5m ${train_cmd} > ${log_file} 2>&1 + timeout 10m ${train_cmd} > ${log_file} 2>&1 if [ $? -ne 0 ];then echo -e "${model_name}, FAIL" else diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 129e439562980a233924995141ea864d052f6dfb..24d406b8f06e6fa38385b2252d37d35a87ecffbb 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -2,23 +2,29 @@ source test_tipc/common_func.sh FILENAME=$1 -GPUID=$2 +MODE=$2 + +# set cuda device +GPUID=$3 if [[ ! $GPUID ]];then GPUID=0 fi -dataline=$(awk 'NR==1, NR==16{print}' $FILENAME) +env="export CUDA_VISIBLE_DEVICES=${GPUID}" +set CUDA_VISIBLE_DEVICES +eval $env + +dataline=$(awk 'NR==1, NR==19{print}' $FILENAME) # parser params IFS=$'\n' lines=(${dataline}) - -# parser cpp inference model +# parser cpp inference model model_name=$(func_parser_value "${lines[1]}") cpp_infer_type=$(func_parser_value "${lines[2]}") cpp_infer_model_dir=$(func_parser_value "${lines[3]}") cpp_det_infer_model_dir=$(func_parser_value "${lines[4]}") cpp_infer_is_quant=$(func_parser_value "${lines[7]}") -# parser cpp inference +# parser cpp inference inference_cmd=$(func_parser_value "${lines[8]}") cpp_use_gpu_list=$(func_parser_value "${lines[9]}") cpp_use_mkldnn_list=$(func_parser_value "${lines[10]}") @@ -31,7 +37,7 @@ cpp_benchmark_value=$(func_parser_value "${lines[16]}") generate_yaml_cmd=$(func_parser_value "${lines[17]}") transform_index_cmd=$(func_parser_value "${lines[18]}") -LOG_PATH="./test_tipc/output" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_cpp.log" # generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py" @@ -43,7 +49,7 @@ function func_shitu_cpp_inference(){ _log_path=$3 _img_dir=$4 _flag_quant=$5 - # inference + # inference for use_gpu in ${cpp_use_gpu_list[*]}; do if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then @@ -57,15 +63,14 @@ function func_shitu_cpp_inference(){ if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then precison="int8" fi - _save_log_path="${_log_path}/shitu_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" - - command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}" - eval $command - eval $transform_index_cmd - command="${_script} 2>&1|tee ${_save_log_path}" - eval $command + _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}" + eval $command + eval $transform_index_cmd + command="${_script} > ${_save_log_path} 2>&1" + eval $command last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -74,7 +79,7 @@ function func_shitu_cpp_inference(){ for precision in ${cpp_precision_list[*]}; do if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then continue - fi + fi if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then continue fi @@ -82,14 +87,14 @@ function func_shitu_cpp_inference(){ continue fi for batch_size in ${cpp_batch_size_list[*]}; do - _save_log_path="${_log_path}/shitu_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" - command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}" + _save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}" + eval $command + eval $transform_index_cmd + command="${_script} > ${_save_log_path} 2>&1" eval $command - eval $transform_index_cmd - command="${_script} 2>&1|tee ${_save_log_path}" - eval $command last_status=${PIPESTATUS[0]} - status_check $last_status "${_script}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -106,7 +111,7 @@ function func_cls_cpp_inference(){ _log_path=$3 _img_dir=$4 _flag_quant=$5 - # inference + # inference for use_gpu in ${cpp_use_gpu_list[*]}; do if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then @@ -120,14 +125,14 @@ function func_cls_cpp_inference(){ if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then precison="int8" fi - _save_log_path="${_log_path}/cls_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" - command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}" - eval $command - command1="${_script} 2>&1|tee ${_save_log_path}" - eval ${command1} + command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}" + eval $command + command1="${_script} > ${_save_log_path} 2>&1" + eval ${command1} last_status=${PIPESTATUS[0]} - status_check $last_status "${command1}" "${status_log}" + status_check $last_status "${command1}" "${status_log}" "${model_name}" done done done @@ -136,7 +141,7 @@ function func_cls_cpp_inference(){ for precision in ${cpp_precision_list[*]}; do if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then continue - fi + fi if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then continue fi @@ -144,13 +149,13 @@ function func_cls_cpp_inference(){ continue fi for batch_size in ${cpp_batch_size_list[*]}; do - _save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" - command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}" + _save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}" + eval $command + command="${_script} > ${_save_log_path} 2>&1" eval $command - command="${_script} 2>&1|tee ${_save_log_path}" - eval $command last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -195,49 +200,11 @@ if [[ $cpp_infer_type == "shitu" ]]; then cd .. fi -if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then - echo "################### build opencv skipped ###################" -else - echo "################### build opencv ###################" - rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/ - wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz - tar -xf opencv-3.4.7.tar.gz - - cd opencv-3.4.7/ - install_path=$(pwd)/opencv3 - - rm -rf build - mkdir build - cd build - - cmake .. \ - -DCMAKE_INSTALL_PREFIX=${install_path} \ - -DCMAKE_BUILD_TYPE=Release \ - -DBUILD_SHARED_LIBS=OFF \ - -DWITH_IPP=OFF \ - -DBUILD_IPP_IW=OFF \ - -DWITH_LAPACK=OFF \ - -DWITH_EIGEN=OFF \ - -DCMAKE_INSTALL_LIBDIR=lib64 \ - -DWITH_ZLIB=ON \ - -DBUILD_ZLIB=ON \ - -DWITH_JPEG=ON \ - -DBUILD_JPEG=ON \ - -DWITH_PNG=ON \ - -DBUILD_PNG=ON \ - -DWITH_TIFF=ON \ - -DBUILD_TIFF=ON - - make -j - make install - cd ../../ - echo "################### build opencv finished ###################" -fi - echo "################### build PaddleClas demo ####################" -OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/ -# LIB_DIR=/work/project/project/test/paddle_inference/ -LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/ +# pwd = /workspace/hesensen/PaddleClas/deploy/cpp_shitu +OPENCV_DIR=$(dirname $PWD)/cpp/opencv-3.4.7/opencv3/ +LIB_DIR=$(dirname $PWD)/cpp/paddle_inference/ + CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`) CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`) @@ -275,18 +242,6 @@ cd ../../../ # cd ../../ echo "################### build PaddleClas demo finished ###################" - -# set cuda device -# GPUID=$2 -# if [ ${#GPUID} -le 0 ];then -# env="export CUDA_VISIBLE_DEVICES=0" -# else -# env="export CUDA_VISIBLE_DEVICES=${GPUID}" -# fi -# set CUDA_VISIBLE_DEVICES -# eval $env - - echo "################### run test ###################" export Count=0 IFS="|" @@ -295,9 +250,9 @@ for infer_model in ${cpp_infer_model_dir[*]}; do #run inference is_quant=${infer_quant_flag[Count]} if [[ $cpp_infer_type == "cls" ]]; then - func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant} + func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant} else - func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant} + func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant} fi Count=$(($Count + 1)) done diff --git a/test_tipc/test_inference_jeston.sh b/test_tipc/test_inference_jeston.sh index 2fd76e1e9e7e8c7b52d0b6838cd15840a59fe5c4..56845003908c1a9cc8ac1b76e40ec108d33e8478 100644 --- a/test_tipc/test_inference_jeston.sh +++ b/test_tipc/test_inference_jeston.sh @@ -71,7 +71,7 @@ if [ ${MODE} = "whole_infer" ]; then echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" else save_infer_dir=${infer_model} fi diff --git a/test_tipc/test_lite_arm_cpu_cpp.sh b/test_tipc/test_lite_arm_cpu_cpp.sh index 86c340060296019d0aef798aacd95580a438e0ff..919226eea5ce38b82fad6c2130a7c6467b6ee041 100644 --- a/test_tipc/test_lite_arm_cpu_cpp.sh +++ b/test_tipc/test_lite_arm_cpu_cpp.sh @@ -67,7 +67,7 @@ function func_test_tipc(){ eval ${command1} command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1" eval ${command2} - status_check $? "${command2}" "${status_log}" + status_check $? "${command2}" "${status_log}" "${model_name}" done done done diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 850fc9049b95400ee6334ff9dfa677947294c2de..d025fb2efd672baab42e4617a13dd127d90a73bc 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -1,17 +1,11 @@ #!/bin/bash -source test_tipc/common_func.sh +source test_tipc/common_func.sh FILENAME=$1 - -dataline=$(cat ${FILENAME}) -lines=(${dataline}) -# common params -model_name=$(func_parser_value "${lines[1]}") -python=$(func_parser_value "${lines[2]}") - +MODE=$2 # parser params -dataline=$(awk 'NR==1, NR==14{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==16{print}' $FILENAME) IFS=$'\n' lines=(${dataline}) @@ -31,17 +25,19 @@ opset_version_key=$(func_parser_key "${lines[8]}") opset_version_value=$(func_parser_value "${lines[8]}") enable_onnx_checker_key=$(func_parser_key "${lines[9]}") enable_onnx_checker_value=$(func_parser_value "${lines[9]}") -# parser onnx inference -inference_py=$(func_parser_value "${lines[10]}") -use_onnx_key=$(func_parser_key "${lines[11]}") -use_onnx_value=$(func_parser_value "${lines[11]}") -inference_model_dir_key=$(func_parser_key "${lines[12]}") -inference_model_dir_value=$(func_parser_value "${lines[12]}") -inference_hardware_key=$(func_parser_key "${lines[13]}") -inference_hardware_value=$(func_parser_value "${lines[13]}") +# parser onnx inference +inference_py=$(func_parser_value "${lines[11]}") +use_onnx_key=$(func_parser_key "${lines[12]}") +use_onnx_value=$(func_parser_value "${lines[12]}") +inference_model_dir_key=$(func_parser_key "${lines[13]}") +inference_model_dir_value=$(func_parser_value "${lines[13]}") +inference_hardware_key=$(func_parser_key "${lines[14]}") +inference_hardware_value=$(func_parser_value "${lines[14]}") +inference_config_key=$(func_parser_key "${lines[15]}") +inference_config_value=$(func_parser_value "${lines[15]}") -LOG_PATH="./test_tipc/output" -mkdir -p ./test_tipc/output +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_paddle2onnx.log" @@ -60,14 +56,18 @@ function func_paddle2onnx(){ trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker}" eval $trans_model_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + # python inference - set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}") - set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}") - set_hardware=$(func_set_params "${inference_hardware_key}" "${inference_hardware_value}") - infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} >${_save_log_path} 2>&1 && cd ../" - eval $infer_model_cmd - status_check $last_status "${infer_model_cmd}" "${status_log}" + if [[ ${inference_py} != "null" ]]; then + set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}") + set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}") + set_hardware=$(func_set_params "${inference_hardware_key}" "${inference_hardware_value}") + set_inference_config=$(func_set_params "${inference_config_key}" "${inference_config_value}") + infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} ${set_inference_config} > ${_save_log_path} 2>&1 && cd ../" + eval $infer_model_cmd + status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" + fi } @@ -75,4 +75,4 @@ echo "################### run test ###################" export Count=0 IFS="|" -func_paddle2onnx \ No newline at end of file +func_paddle2onnx \ No newline at end of file diff --git a/test_tipc/test_ptq_inference_python.sh b/test_tipc/test_ptq_inference_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..82c9816478f9ea993b2e53f8a685766e8dbf81d7 --- /dev/null +++ b/test_tipc/test_ptq_inference_python.sh @@ -0,0 +1,173 @@ +#!/bin/bash +FILENAME=$1 +source test_tipc/common_func.sh + +# MODE be one of ['whole_infer'] +MODE=$2 + +dataline=$(cat ${FILENAME}) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +# The training params +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +gpu_list=$(func_parser_value "${lines[3]}") +train_use_gpu_key=$(func_parser_key "${lines[4]}") +train_use_gpu_value=$(func_parser_value "${lines[4]}") +autocast_list=$(func_parser_value "${lines[5]}") +autocast_key=$(func_parser_key "${lines[5]}") +epoch_key=$(func_parser_key "${lines[6]}") +epoch_num=$(func_parser_params "${lines[6]}") +save_model_key=$(func_parser_key "${lines[7]}") +train_batch_key=$(func_parser_key "${lines[8]}") +train_batch_value=$(func_parser_value "${lines[8]}") +pretrain_model_key=$(func_parser_key "${lines[9]}") +pretrain_model_value=$(func_parser_value "${lines[9]}") +train_model_name=$(func_parser_value "${lines[10]}") +train_infer_img_dir=$(func_parser_value "${lines[11]}") +train_param_key1=$(func_parser_key "${lines[12]}") +train_param_value1=$(func_parser_value "${lines[12]}") + +trainer_list=$(func_parser_value "${lines[14]}") +trainer_norm=$(func_parser_key "${lines[15]}") +norm_trainer=$(func_parser_value "${lines[15]}") +pact_key=$(func_parser_key "${lines[16]}") +pact_trainer=$(func_parser_value "${lines[16]}") +fpgm_key=$(func_parser_key "${lines[17]}") +fpgm_trainer=$(func_parser_value "${lines[17]}") +distill_key=$(func_parser_key "${lines[18]}") +distill_trainer=$(func_parser_value "${lines[18]}") +to_static_key=$(func_parser_key "${lines[19]}") +to_static_trainer=$(func_parser_value "${lines[19]}") +trainer_key2=$(func_parser_key "${lines[20]}") +trainer_value2=$(func_parser_value "${lines[20]}") + +eval_py=$(func_parser_value "${lines[23]}") +eval_key1=$(func_parser_key "${lines[24]}") +eval_value1=$(func_parser_value "${lines[24]}") + +save_infer_key=$(func_parser_key "${lines[27]}") +export_weight=$(func_parser_key "${lines[28]}") +norm_export=$(func_parser_value "${lines[29]}") +pact_export=$(func_parser_value "${lines[30]}") +fpgm_export=$(func_parser_value "${lines[31]}") +distill_export=$(func_parser_value "${lines[32]}") +kl_quant_cmd_key=$(func_parser_key "${lines[33]}") +kl_quant_cmd_value=$(func_parser_value "${lines[33]}") +export_key2=$(func_parser_key "${lines[34]}") +export_value2=$(func_parser_value "${lines[34]}") + +# parser inference model +infer_model_dir_list=$(func_parser_value "${lines[36]}") +infer_export_flag=$(func_parser_value "${lines[37]}") +infer_is_quant=$(func_parser_value "${lines[38]}") + +# parser inference +inference_py=$(func_parser_value "${lines[39]}") +use_gpu_key=$(func_parser_key "${lines[40]}") +use_gpu_list=$(func_parser_value "${lines[40]}") +use_mkldnn_key=$(func_parser_key "${lines[41]}") +use_mkldnn_list=$(func_parser_value "${lines[41]}") +cpu_threads_key=$(func_parser_key "${lines[42]}") +cpu_threads_list=$(func_parser_value "${lines[42]}") +batch_size_key=$(func_parser_key "${lines[43]}") +batch_size_list=$(func_parser_value "${lines[43]}") +use_trt_key=$(func_parser_key "${lines[44]}") +use_trt_list=$(func_parser_value "${lines[44]}") +precision_key=$(func_parser_key "${lines[45]}") +precision_list=$(func_parser_value "${lines[45]}") +infer_model_key=$(func_parser_key "${lines[46]}") +image_dir_key=$(func_parser_key "${lines[47]}") +infer_img_dir=$(func_parser_value "${lines[47]}") +save_log_key=$(func_parser_key "${lines[48]}") +benchmark_key=$(func_parser_key "${lines[49]}") +benchmark_value=$(func_parser_value "${lines[49]}") +infer_key1=$(func_parser_key "${lines[50]}") +infer_value1=$(func_parser_value "${lines[50]}") +if [ ! $epoch_num ]; then + epoch_num=2 +fi +if [[ $MODE = 'benchmark_train' ]]; then + epoch_num=1 +fi + +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_python.log" + +function func_inference() { + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + _flag_quant=$6 + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "../${status_log}" "${model_name}" + done + done + done + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [ ${precision} = "True" ] && [ ${use_trt} = "False" ]; then + continue + fi + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${precision_key}" "${precision}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "../${status_log}" "${model_name}" + done + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +# for kl_quant +if [ ${kl_quant_cmd_value} != "null" ] && [ ${kl_quant_cmd_value} != "False" ]; then + echo "kl_quant" + command="${python} ${kl_quant_cmd_value}" + echo ${command} + eval $command + last_status=${PIPESTATUS[0]} + status_check $last_status "${command}" "${status_log}" "${model_name}" + cd ${infer_model_dir_list}/quant_post_static_model + ln -s __model__ inference.pdmodel + ln -s __params__ inference.pdiparams + cd ../../deploy + is_quant=True + func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} + cd .. +fi diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh deleted file mode 100644 index c36935a60fecacea672fd932773a8fb0bdcd619b..0000000000000000000000000000000000000000 --- a/test_tipc/test_serving.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -source test_tipc/common_func.sh - -FILENAME=$1 -dataline=$(awk 'NR==1, NR==18{print}' $FILENAME) - -# parser params -IFS=$'\n' -lines=(${dataline}) - -# parser serving -model_name=$(func_parser_value "${lines[1]}") -python=$(func_parser_value "${lines[2]}") -trans_model_py=$(func_parser_value "${lines[3]}") -infer_model_dir_key=$(func_parser_key "${lines[4]}") -infer_model_dir_value=$(func_parser_value "${lines[4]}") -model_filename_key=$(func_parser_key "${lines[5]}") -model_filename_value=$(func_parser_value "${lines[5]}") -params_filename_key=$(func_parser_key "${lines[6]}") -params_filename_value=$(func_parser_value "${lines[6]}") -serving_server_key=$(func_parser_key "${lines[7]}") -serving_server_value=$(func_parser_value "${lines[7]}") -serving_client_key=$(func_parser_key "${lines[8]}") -serving_client_value=$(func_parser_value "${lines[8]}") -serving_dir_value=$(func_parser_value "${lines[9]}") -web_service_py=$(func_parser_value "${lines[10]}") -web_use_gpu_key=$(func_parser_key "${lines[11]}") -web_use_gpu_list=$(func_parser_value "${lines[11]}") -web_use_mkldnn_key=$(func_parser_key "${lines[12]}") -web_use_mkldnn_list=$(func_parser_value "${lines[12]}") -web_cpu_threads_key=$(func_parser_key "${lines[13]}") -web_cpu_threads_list=$(func_parser_value "${lines[13]}") -web_use_trt_key=$(func_parser_key "${lines[14]}") -web_use_trt_list=$(func_parser_value "${lines[14]}") -web_precision_key=$(func_parser_key "${lines[15]}") -web_precision_list=$(func_parser_value "${lines[15]}") -pipeline_py=$(func_parser_value "${lines[16]}") -image_dir_key=$(func_parser_key "${lines[17]}") -image_dir_value=$(func_parser_value "${lines[17]}") - -LOG_PATH="../../test_tipc/output" -mkdir -p ./test_tipc/output -status_log="${LOG_PATH}/results_serving.log" - -function func_serving(){ - IFS='|' - _python=$1 - _script=$2 - _model_dir=$3 - # pdserving - set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}") - set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") - set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") - set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") - set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") - set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") - trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" - eval $trans_model_cmd - cd ${serving_dir_value} - echo $PWD - unset https_proxy - unset http_proxy - for python in ${python[*]}; do - if [ ${python} = "cpp"]; then - for use_gpu in ${web_use_gpu_list[*]}; do - if [ ${use_gpu} = "null" ]; then - web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293" - eval $web_service_cmd - sleep 2s - _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" - pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" - eval $pipeline_cmd - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - else - web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0" - eval $web_service_cmd - sleep 2s - _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" - pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" - eval $pipeline_cmd - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - fi - done - else - # python serving - for use_gpu in ${web_use_gpu_list[*]}; do - echo ${ues_gpu} - if [ ${use_gpu} = "null" ]; then - for use_mkldnn in ${web_use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ]; then - continue - fi - for threads in ${web_cpu_threads_list[*]}; do - set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" - eval $web_service_cmd - sleep 2s - for pipeline in ${pipeline_py[*]}; do - _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" - pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 " - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - done - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done - done - elif [ ${use_gpu} = "0" ]; then - for use_trt in ${web_use_trt_list[*]}; do - for precision in ${web_precision_list[*]}; do - if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then - continue - fi - if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then - continue - fi - if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then - continue - fi - set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") - set_precision=$(func_set_params "${web_precision_key}" "${precision}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " - eval $web_service_cmd - - sleep 2s - for pipeline in ${pipeline_py[*]}; do - _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" - pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1" - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" - sleep 2s - done - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done - done - else - echo "Does not support hardware other than CPU and GPU Currently!" - fi - done - fi - done -} - - -# set cuda device -GPUID=$2 -if [ ${#GPUID} -le 0 ];then - env=" " -else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" -fi -set CUDA_VISIBLE_DEVICES -eval $env - - -echo "################### run test ###################" - -export Count=0 -IFS="|" -func_serving "${web_service_cmd}" diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh new file mode 100644 index 0000000000000000000000000000000000000000..fdb7ef186bafd9dcd879150188e1f0450ca87211 --- /dev/null +++ b/test_tipc/test_serving_infer_cpp.sh @@ -0,0 +1,270 @@ +#!/bin/bash +source test_tipc/common_func.sh + +FILENAME=$1 +dataline=$(awk 'NR==1, NR==19{print}' $FILENAME) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +function func_get_url_file_name(){ + strs=$1 + IFS="/" + array=(${strs}) + tmp=${array[${#array[@]}-1]} + echo ${tmp} +} + +# parser serving +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +trans_model_py=$(func_parser_value "${lines[4]}") +infer_model_dir_key=$(func_parser_key "${lines[5]}") +infer_model_dir_value=$(func_parser_value "${lines[5]}") +model_filename_key=$(func_parser_key "${lines[6]}") +model_filename_value=$(func_parser_value "${lines[6]}") +params_filename_key=$(func_parser_key "${lines[7]}") +params_filename_value=$(func_parser_value "${lines[7]}") +serving_server_key=$(func_parser_key "${lines[8]}") +serving_server_value=$(func_parser_value "${lines[8]}") +serving_client_key=$(func_parser_key "${lines[9]}") +serving_client_value=$(func_parser_value "${lines[9]}") +serving_dir_value=$(func_parser_value "${lines[10]}") +web_service_py=$(func_parser_value "${lines[11]}") +web_use_gpu_key=$(func_parser_key "${lines[12]}") +web_use_gpu_list=$(func_parser_value "${lines[12]}") +pipeline_py=$(func_parser_value "${lines[13]}") + + +function func_serving_cls(){ + LOG_PATH="test_tipc/output/${model_name}" + mkdir -p ${LOG_PATH} + LOG_PATH="../../${LOG_PATH}" + status_log="${LOG_PATH}/results_serving.log" + IFS='|' + + # pdserving + set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") + set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") + + for python_ in ${python[*]}; do + if [[ ${python_} =~ "python" ]]; then + trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval ${trans_model_cmd} + break + fi + done + + # modify the alias_name of fetch_var to "outputs" + server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_server_value}/serving_server_conf.prototxt" + eval ${server_fetch_var_line_cmd} + + client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_client_value}/serving_client_conf.prototxt" + eval ${client_fetch_var_line_cmd} + + prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${serving_server_value}/serving_server_conf.prototxt) + IFS=$'\n' + prototxt_lines=(${prototxt_dataline}) + feed_var_name=$(func_parser_value "${prototxt_lines[2]}") + IFS='|' + + cd ${serving_dir_value} + unset https_proxy + unset http_proxy + + for item in ${python[*]}; do + if [[ ${item} =~ "python" ]]; then + python_=${item} + break + fi + done + serving_client_dir_name=$(func_get_url_file_name "$serving_client_value") + set_client_feed_type_cmd="sed -i '/feed_type/,/: .*/s/feed_type: .*/feed_type: 20/' ${serving_client_dir_name}/serving_client_conf.prototxt" + eval ${set_client_feed_type_cmd} + set_client_shape_cmd="sed -i '/shape: 3/,/shape: 3/s/shape: 3/shape: 1/' ${serving_client_dir_name}/serving_client_conf.prototxt" + eval ${set_client_shape_cmd} + set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt" + eval ${set_client_shape224_cmd} + set_client_shape224_cmd="sed -i '/shape: 224/,/shape: 224/s/shape: 224//' ${serving_client_dir_name}/serving_client_conf.prototxt" + eval ${set_client_shape224_cmd} + + set_pipeline_load_config_cmd="sed -i '/load_client_config/,/.prototxt/s/.\/.*\/serving_client_conf.prototxt/.\/${serving_client_dir_name}\/serving_client_conf.prototxt/' ${pipeline_py}" + eval ${set_pipeline_load_config_cmd} + + set_pipeline_feed_var_cmd="sed -i '/feed=/,/: image}/s/feed={.*: image}/feed={${feed_var_name}: image}/' ${pipeline_py}" + eval ${set_pipeline_feed_var_cmd} + + serving_server_dir_name=$(func_get_url_file_name "$serving_server_value") + + for use_gpu in ${web_use_gpu_list[*]}; do + if [[ ${use_gpu} = "null" ]]; then + web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &" + eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" + sleep 5s + _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log" + pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" + eval "${python_} -m paddle_serving_server.serve stop" + sleep 5s + else + web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &" + eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" + sleep 8s + + _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log" + pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 " + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" + sleep 5s + eval "${python_} -m paddle_serving_server.serve stop" + fi + done +} + + +function func_serving_rec(){ + LOG_PATH="test_tipc/output/${model_name}" + mkdir -p ${LOG_PATH} + LOG_PATH="../../../${LOG_PATH}" + status_log="${LOG_PATH}/results_serving.log" + trans_model_py=$(func_parser_value "${lines[5]}") + cls_infer_model_dir_key=$(func_parser_key "${lines[6]}") + cls_infer_model_dir_value=$(func_parser_value "${lines[6]}") + det_infer_model_dir_key=$(func_parser_key "${lines[7]}") + det_infer_model_dir_value=$(func_parser_value "${lines[7]}") + model_filename_key=$(func_parser_key "${lines[8]}") + model_filename_value=$(func_parser_value "${lines[8]}") + params_filename_key=$(func_parser_key "${lines[9]}") + params_filename_value=$(func_parser_value "${lines[9]}") + + cls_serving_server_key=$(func_parser_key "${lines[10]}") + cls_serving_server_value=$(func_parser_value "${lines[10]}") + cls_serving_client_key=$(func_parser_key "${lines[11]}") + cls_serving_client_value=$(func_parser_value "${lines[11]}") + + det_serving_server_key=$(func_parser_key "${lines[12]}") + det_serving_server_value=$(func_parser_value "${lines[12]}") + det_serving_client_key=$(func_parser_key "${lines[13]}") + det_serving_client_value=$(func_parser_value "${lines[13]}") + + serving_dir_value=$(func_parser_value "${lines[14]}") + web_service_py=$(func_parser_value "${lines[15]}") + web_use_gpu_key=$(func_parser_key "${lines[16]}") + web_use_gpu_list=$(func_parser_value "${lines[16]}") + pipeline_py=$(func_parser_value "${lines[17]}") + + IFS='|' + for python_ in ${python[*]}; do + if [[ ${python_} =~ "python" ]]; then + python_interp=${python_} + break + fi + done + + # pdserving + cd ./deploy + set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}") + set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}") + cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval ${cls_trans_model_cmd} + + set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}") + set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}") + det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval ${det_trans_model_cmd} + + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_serving/*.prototxt ${cls_serving_server_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/general_PPLCNet_x2_5_lite_v1.0_client/*.prototxt ${cls_serving_client_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/*.prototxt ${det_serving_client_value}" + eval ${cp_prototxt_cmd} + cp_prototxt_cmd="cp ./paddleserving/recognition/preprocess/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/*.prototxt ${det_serving_server_value}" + eval ${cp_prototxt_cmd} + + prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${cls_serving_server_value}/serving_server_conf.prototxt) + IFS=$'\n' + prototxt_lines=(${prototxt_dataline}) + feed_var_name=$(func_parser_value "${prototxt_lines[2]}") + IFS='|' + + cd ${serving_dir_value} + unset https_proxy + unset http_proxy + + # export SERVING_BIN=${PWD}/../Serving/server-build-gpu-opencv/core/general-server/serving + for use_gpu in ${web_use_gpu_list[*]}; do + if [ ${use_gpu} = "null" ]; then + det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") + web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &" + eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" + sleep 5s + _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log" + pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 " + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" + eval "${python_} -m paddle_serving_server.serve stop" + sleep 5s + else + det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value") + web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &" + eval ${web_service_cpp_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" + sleep 5s + _save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log" + pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 " + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" + eval "${python_} -m paddle_serving_server.serve stop" + sleep 5s + fi + done +} + + +# set cuda device +GPUID=$3 +if [ ${#GPUID} -le 0 ];then + env="export CUDA_VISIBLE_DEVICES=0" +else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" +fi +set CUDA_VISIBLE_DEVICES +eval ${env} + + +echo "################### run test ###################" + +export Count=0 +IFS="|" +if [[ ${model_name} =~ "ShiTu" ]]; then + func_serving_rec +else + func_serving_cls +fi diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..050c3c89c9a454eb7f973f405ded37a3f1df042a --- /dev/null +++ b/test_tipc/test_serving_infer_python.sh @@ -0,0 +1,318 @@ +#!/bin/bash +source test_tipc/common_func.sh + +FILENAME=$1 +MODE=$2 +dataline=$(awk 'NR==1, NR==19{print}' $FILENAME) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +function func_get_url_file_name(){ + strs=$1 + IFS="/" + array=(${strs}) + tmp=${array[${#array[@]}-1]} + echo ${tmp} +} + +# parser serving +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +trans_model_py=$(func_parser_value "${lines[4]}") +infer_model_dir_key=$(func_parser_key "${lines[5]}") +infer_model_dir_value=$(func_parser_value "${lines[5]}") +model_filename_key=$(func_parser_key "${lines[6]}") +model_filename_value=$(func_parser_value "${lines[6]}") +params_filename_key=$(func_parser_key "${lines[7]}") +params_filename_value=$(func_parser_value "${lines[7]}") +serving_server_key=$(func_parser_key "${lines[8]}") +serving_server_value=$(func_parser_value "${lines[8]}") +serving_client_key=$(func_parser_key "${lines[9]}") +serving_client_value=$(func_parser_value "${lines[9]}") +serving_dir_value=$(func_parser_value "${lines[10]}") +web_service_py=$(func_parser_value "${lines[11]}") +web_use_gpu_key=$(func_parser_key "${lines[12]}") +web_use_gpu_list=$(func_parser_value "${lines[12]}") +pipeline_py=$(func_parser_value "${lines[13]}") + + +function func_serving_cls(){ + LOG_PATH="test_tipc/output/${model_name}/${MODE}" + mkdir -p ${LOG_PATH} + LOG_PATH="../../${LOG_PATH}" + status_log="${LOG_PATH}/results_serving.log" + IFS='|' + + # pdserving + set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") + set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") + + for python_ in ${python[*]}; do + if [[ ${python_} =~ "python" ]]; then + trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval ${trans_model_cmd} + break + fi + done + + # modify the alias_name of fetch_var to "outputs" + server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_server_value}/serving_server_conf.prototxt" + eval ${server_fetch_var_line_cmd} + + client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"prediction\"/' ${serving_client_value}/serving_client_conf.prototxt" + eval ${client_fetch_var_line_cmd} + + prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${serving_server_value}/serving_server_conf.prototxt) + IFS=$'\n' + prototxt_lines=(${prototxt_dataline}) + feed_var_name=$(func_parser_value "${prototxt_lines[2]}") + IFS='|' + + cd ${serving_dir_value} + unset https_proxy + unset http_proxy + + # python serving + # modify the input_name in "classification_web_service.py" to be consistent with feed_var.name in prototxt + set_web_service_feed_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}" + eval ${set_web_service_feed_var_cmd} + + model_config=21 + serving_server_dir_name=$(func_get_url_file_name "$serving_server_value") + set_model_config_cmd="sed -i '${model_config}s/model_config: .*/model_config: ${serving_server_dir_name}/' config.yml" + eval ${set_model_config_cmd} + + for use_gpu in ${web_use_gpu_list[*]}; do + if [[ ${use_gpu} = "null" ]]; then + device_type_line=24 + set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml" + eval ${set_device_type_cmd} + + devices_line=27 + set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml" + eval ${set_devices_cmd} + + web_service_cmd="${python_} ${web_service_py} &" + eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + sleep 5s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log" + pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1 " + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + sleep 5s + done + eval "${python_} -m paddle_serving_server.serve stop" + elif [ ${use_gpu} -eq 0 ]; then + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then + continue + fi + + device_type_line=24 + set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml" + eval ${set_device_type_cmd} + + devices_line=27 + set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml" + eval ${set_devices_cmd} + + web_service_cmd="${python_} ${web_service_py} & " + eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + sleep 5s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log" + pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1" + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + sleep 5s + done + eval "${python_} -m paddle_serving_server.serve stop" + else + echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!" + fi + done +} + + +function func_serving_rec(){ + LOG_PATH="test_tipc/output/${model_name}/${MODE}" + mkdir -p ${LOG_PATH} + LOG_PATH="../../../${LOG_PATH}" + status_log="${LOG_PATH}/results_serving.log" + trans_model_py=$(func_parser_value "${lines[5]}") + cls_infer_model_dir_key=$(func_parser_key "${lines[6]}") + cls_infer_model_dir_value=$(func_parser_value "${lines[6]}") + det_infer_model_dir_key=$(func_parser_key "${lines[7]}") + det_infer_model_dir_value=$(func_parser_value "${lines[7]}") + model_filename_key=$(func_parser_key "${lines[8]}") + model_filename_value=$(func_parser_value "${lines[8]}") + params_filename_key=$(func_parser_key "${lines[9]}") + params_filename_value=$(func_parser_value "${lines[9]}") + + cls_serving_server_key=$(func_parser_key "${lines[10]}") + cls_serving_server_value=$(func_parser_value "${lines[10]}") + cls_serving_client_key=$(func_parser_key "${lines[11]}") + cls_serving_client_value=$(func_parser_value "${lines[11]}") + + det_serving_server_key=$(func_parser_key "${lines[12]}") + det_serving_server_value=$(func_parser_value "${lines[12]}") + det_serving_client_key=$(func_parser_key "${lines[13]}") + det_serving_client_value=$(func_parser_value "${lines[13]}") + + serving_dir_value=$(func_parser_value "${lines[14]}") + web_service_py=$(func_parser_value "${lines[15]}") + web_use_gpu_key=$(func_parser_key "${lines[16]}") + web_use_gpu_list=$(func_parser_value "${lines[16]}") + pipeline_py=$(func_parser_value "${lines[17]}") + + IFS='|' + for python_ in ${python[*]}; do + if [[ ${python_} =~ "python" ]]; then + python_interp=${python_} + break + fi + done + + # pdserving + cd ./deploy + set_dirname=$(func_set_params "${cls_infer_model_dir_key}" "${cls_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}") + set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}") + cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval ${cls_trans_model_cmd} + + set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}") + set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") + set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") + set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}") + set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}") + det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + eval ${det_trans_model_cmd} + + # modify the alias_name of fetch_var to "outputs" + server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_server_value/serving_server_conf.prototxt" + eval ${server_fetch_var_line_cmd} + client_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_client_value/serving_client_conf.prototxt" + eval ${client_fetch_var_line_cmd} + + prototxt_dataline=$(awk 'NR==1, NR==3{print}' ${cls_serving_server_value}/serving_server_conf.prototxt) + IFS=$'\n' + prototxt_lines=(${prototxt_dataline}) + feed_var_name=$(func_parser_value "${prototxt_lines[2]}") + IFS='|' + + cd ${serving_dir_value} + unset https_proxy + unset http_proxy + + # modify the input_name in "recognition_web_service.py" to be consistent with feed_var.name in prototxt + set_web_service_feed_var_cmd="sed -i '/preprocess/,/input_imgs}/s/{.*: input_imgs}/{${feed_var_name}: input_imgs}/' ${web_service_py}" + eval ${set_web_service_feed_var_cmd} + # python serving + for use_gpu in ${web_use_gpu_list[*]}; do + if [[ ${use_gpu} = "null" ]]; then + device_type_line=24 + set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 0/' config.yml" + eval ${set_device_type_cmd} + + devices_line=27 + set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml" + eval ${set_devices_cmd} + + web_service_cmd="${python} ${web_service_py} &" + eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + sleep 5s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1 " + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + sleep 5s + done + eval "${python_} -m paddle_serving_server.serve stop" + elif [ ${use_gpu} -eq 0 ]; then + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then + continue + fi + + device_type_line=24 + set_device_type_cmd="sed -i '${device_type_line}s/device_type: .*/device_type: 1/' config.yml" + eval ${set_device_type_cmd} + + devices_line=27 + set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml" + eval ${set_devices_cmd} + + web_service_cmd="${python} ${web_service_py} & " + eval ${web_service_cmd} + last_status=${PIPESTATUS[0]} + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + sleep 10s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1" + eval ${pipeline_cmd} + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + sleep 10s + done + eval "${python_} -m paddle_serving_server.serve stop" + else + echo "Does not support hardware [${use_gpu}] other than CPU and GPU Currently!" + fi + done +} + + +# set cuda device +GPUID=$3 +if [ ${#GPUID} -le 0 ];then + env="export CUDA_VISIBLE_DEVICES=0" +else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" +fi +set CUDA_VISIBLE_DEVICES +eval ${env} + + +echo "################### run test ###################" + +export Count=0 +IFS="|" +if [[ ${model_name} = "PPShiTu" ]]; then + func_serving_rec +else + func_serving_cls +fi diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index a567ef3c6ae1e5a7429d4a5738cdb8ce5c6189fa..ad5b301f1ef5bacdd82cafff35d3d61699b38151 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -32,6 +32,7 @@ train_param_key1=$(func_parser_key "${lines[12]}") train_param_value1=$(func_parser_value "${lines[12]}") trainer_list=$(func_parser_value "${lines[14]}") + trainer_norm=$(func_parser_key "${lines[15]}") norm_trainer=$(func_parser_value "${lines[15]}") pact_key=$(func_parser_key "${lines[16]}") @@ -60,12 +61,12 @@ kl_quant_cmd_value=$(func_parser_value "${lines[33]}") export_key2=$(func_parser_key "${lines[34]}") export_value2=$(func_parser_value "${lines[34]}") -# parser inference model +# parser inference model infer_model_dir_list=$(func_parser_value "${lines[36]}") infer_export_flag=$(func_parser_value "${lines[37]}") infer_is_quant=$(func_parser_value "${lines[38]}") -# parser inference +# parser inference inference_py=$(func_parser_value "${lines[39]}") use_gpu_key=$(func_parser_key "${lines[40]}") use_gpu_list=$(func_parser_value "${lines[40]}") @@ -88,17 +89,17 @@ benchmark_value=$(func_parser_value "${lines[49]}") infer_key1=$(func_parser_key "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}") if [ ! $epoch_num ]; then - epoch_num=2 + epoch_num=2 fi -if [ $MODE = 'benchmark_train' ]; then - epoch_num=1 +if [[ $MODE = 'benchmark_train' ]]; then + epoch_num=1 fi -LOG_PATH="./test_tipc/output/${model_name}" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" -function func_inference(){ +function func_inference() { IFS='|' _python=$1 _script=$2 @@ -106,13 +107,10 @@ function func_inference(){ _log_path=$4 _img_dir=$5 _flag_quant=$6 - # inference + # inference for use_gpu in ${use_gpu_list[*]}; do if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then for use_mkldnn in ${use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then - continue - fi for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" @@ -126,7 +124,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" + status_check $last_status "${command}" "../${status_log}" "${model_name}" done done done @@ -136,9 +134,6 @@ function func_inference(){ if [ ${precision} = "True" ] && [ ${use_trt} = "False" ]; then continue fi - if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then - continue - fi for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") @@ -151,7 +146,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" + status_check $last_status "${command}" "../${status_log}" "${model_name}" done done done @@ -161,51 +156,23 @@ function func_inference(){ done } -if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then - IFS="|" - infer_export_flag=(${infer_export_flag}) - if [ ${infer_export_flag} != "null" ] && [ ${infer_export_flag} != "False" ]; then - rm -rf ${infer_model_dir_list/..\//} - export_cmd="${python} ${norm_export} -o Global.pretrained_model=${model_name}_pretrained -o Global.save_inference_dir=${infer_model_dir_list/..\//}" - eval $export_cmd - fi -fi -if [ ${MODE} = "whole_infer" ]; then - GPUID=$3 - if [ ${#GPUID} -le 0 ];then - env=" " - else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" - fi - # set CUDA_VISIBLE_DEVICES - eval $env - export Count=0 - cd deploy - for infer_model in ${infer_model_dir_list[*]}; do - #run inference - is_quant=${infer_quant_flag[Count]} - echo "is_quant: ${is_quant}" - func_inference "${python}" "${inference_py}" "${infer_model}" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} - Count=$(($Count + 1)) - done - cd .. - -elif [ ${MODE} = "klquant_whole_infer" ]; then +if [[ ${MODE} = "whole_infer" ]]; then # for kl_quant if [ ${kl_quant_cmd_value} != "null" ] && [ ${kl_quant_cmd_value} != "False" ]; then - echo "kl_quant" - command="${python} ${kl_quant_cmd_value}" - eval $command - last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" - cd inference/quant_post_static_model - ln -s __model__ inference.pdmodel - ln -s __params__ inference.pdiparams - cd ../../deploy - is_quant=True - func_inference "${python}" "${inference_py}" "${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} - cd .. + echo "kl_quant" + command="${python} ${kl_quant_cmd_value}" + echo ${command} + eval $command + last_status=${PIPESTATUS[0]} + status_check $last_status "${command}" "${status_log}" "${model_name}" + cd ${infer_model_dir_list}/quant_post_static_model + ln -s __model__ inference.pdmodel + ln -s __params__ inference.pdiparams + cd ../../deploy + is_quant=True + func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} + cd .. fi else IFS="|" @@ -215,12 +182,12 @@ else train_use_gpu=${USE_GPU_KEY[Count]} Count=$(($Count + 1)) ips="" - if [ ${gpu} = "-1" ];then + if [ ${gpu} = "-1" ]; then env="" - elif [ ${#gpu} -le 1 ];then + elif [ ${#gpu} -le 1 ]; then env="export CUDA_VISIBLE_DEVICES=${gpu}" eval ${env} - elif [ ${#gpu} -le 15 ];then + elif [ ${#gpu} -le 15 ]; then IFS="," array=(${gpu}) env="export CUDA_VISIBLE_DEVICES=${array[0]}" @@ -234,7 +201,7 @@ else env=" " fi for autocast in ${autocast_list[*]}; do - for trainer in ${trainer_list[*]}; do + for trainer in ${trainer_list[*]}; do flag_quant=False if [ ${trainer} = ${pact_key} ]; then run_train=${pact_trainer} @@ -263,14 +230,16 @@ else if [ ${run_train} = "null" ]; then continue fi - + set_autocast=$(func_set_params "${autocast_key}" "${autocast}") set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}") set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu_value}") - if [ ${#ips} -le 26 ];then + if [ ${#ips} -le 15 ]; then + # if length of ips >= 15, then it is seen as multi-machine + # 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0 save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" nodes=1 else @@ -280,68 +249,68 @@ else nodes=${#ips_array[@]} save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" fi - + # load pretrain from norm training if current trainer is pact or fpgm trainer # if [ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]; then # set_pretrain="${load_norm_train_model}" # fi set_save_model=$(func_set_params "${save_model_key}" "${save_log}") - if [ ${#gpu} -le 2 ];then # train with cpu or single gpu + if [ ${#gpu} -le 2 ]; then # train with cpu or single gpu cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " - elif [ ${#ips} -le 26 ];then # train with multi-gpu + elif [ ${#ips} -le 15 ]; then # train with multi-gpu cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" - else # train with multi-machine + else # train with multi-machine cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" fi # run train - eval "unset CUDA_VISIBLE_DEVICES" - # export FLAGS_cudnn_deterministic=True - sleep 5 + eval "unset CUDA_VISIBLE_DEVICES" + # export FLAGS_cudnn_deterministic=True + sleep 5 eval $cmd - status_check $? "${cmd}" "${status_log}" + status_check $? "${cmd}" "${status_log}" "${model_name}" sleep 5 - - if [[ $FILENAME == *GeneralRecognition* ]]; then - set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") - else + + if [[ $FILENAME == *GeneralRecognition* ]]; then + set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") + else set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${model_name}/${train_model_name}") - fi - # save norm trained models to set pretrain for pact training and fpgm training - if [ ${trainer} = ${trainer_norm} ]; then + fi + # save norm trained models to set pretrain for pact training and fpgm training + if [[ ${trainer} = ${trainer_norm} || ${trainer} = ${pact_key} ]]; then load_norm_train_model=${set_eval_pretrain} fi - # run eval + # run eval if [ ${eval_py} != "null" ]; then set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") - eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" + status_check $? "${eval_cmd}" "${status_log}" "${model_name}" sleep 5 fi # run export model - if [ ${run_export} != "null" ]; then + if [ ${run_export} != "null" ]; then # run export model save_infer_path="${save_log}" - if [[ $FILENAME == *GeneralRecognition* ]]; then - set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") - else - set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${model_name}/${train_model_name}") - fi + if [[ $FILENAME == *GeneralRecognition* ]]; then + set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") + else + set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${model_name}/${train_model_name}") + fi set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" #run inference eval $env save_infer_path="${save_log}" - cd deploy + cd deploy func_inference "${python}" "${inference_py}" "../${save_infer_path}" "../${LOG_PATH}" "${infer_img_dir}" "${flag_quant}" - cd .. + cd .. fi eval "unset CUDA_VISIBLE_DEVICES" - done # done with: for trainer in ${trainer_list[*]}; do - done # done with: for autocast in ${autocast_list[*]}; do - done # done with: for gpu in ${gpu_list[*]}; do -fi # end if [ ${MODE} = "infer" ]; then + done # done with: for trainer in ${trainer_list[*]}; do + done # done with: for autocast in ${autocast_list[*]}; do + done # done with: for gpu in ${gpu_list[*]}; do +fi # end if [ ${MODE} = "infer" ]; then diff --git a/tools/export_model.py b/tools/export_model.py index 01aba06c1f715f764352c6fd38a23c470e66e289..35f432f50e9d1dd903be3d0d3e07a4e42f2a2b7f 100644 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -30,5 +30,7 @@ if __name__ == "__main__": args = config.parse_args() config = config.get_config( args.config, overrides=args.override, show=False) + if config["Arch"].get("use_sync_bn", False): + config["Arch"]["use_sync_bn"] = False engine = Engine(config, mode="export") engine.export() diff --git a/tools/run.sh b/tools/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..accf628f4bdc87142279e774abfa25634f1e243a --- /dev/null +++ b/tools/run.sh @@ -0,0 +1,302 @@ +#!/usr/bin/env bash +GPU_IDS="0,1,2,3" + +# Basic Config +CONFIG="ppcls/configs/cls_demo/person/PPLCNet/PPLCNet_x1_0.yaml" +EPOCHS=1 +OUTPUT="output_debug4" +STATUS_LOG="${OUTPUT}/status_result.log" +RESULT="${OUTPUT}/result.log" + + +# Search Options +LR_LIST=( 0.0075 0.01 0.0125 ) +RESOLUTION_LIST=( 176 192 224 ) +RA_PROB_LIST=( 0.0 0.1 0.5 ) +RE_PROB_LIST=( 0.0 0.1 0.5 ) +LR_MULT_LIST=( [0.0,0.2,0.4,0.6,0.8,1.0] [0.0,0.4,0.4,0.8,0.8,1.0] ) +TEACHER_LIST=( "ResNet101_vd" "ResNet50_vd" ) + + +# Train Mode +declare -A MODE_MAP +MODE_MAP=(["search_lr"]=1 ["search_resolution"]=1 ["search_ra_prob"]=1 ["search_re_prob"]=1 ["search_lr_mult_list"]=1 ["search_teacher"]=1 ["train_distillation_model"]=1) + +export CUDA_VISIBLE_DEVICES=${GPU_IDS} + + +function status_check(){ + last_status=$1 # the exit code + run_command=$2 + run_log=$3 + if [ $last_status -eq 0 ]; then + echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + else + echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + fi +} + + +function get_max_value(){ + array=($*) + max=${array[0]} + index=0 + for (( i=0; i<${#array[*]-1}; i++ )); do + if [[ $(echo "${array[$i]} > $max"|bc) -eq 1 ]]; then + max=${array[$i]} + index=${i} + else + continue + fi + done + echo ${max} + echo ${index} +} + +function get_best_info(){ + _parameter=$1 + params_index=2 + if [[ ${_parameter} == "TEACHER" ]]; then + params_index=3 + fi + parameters_list=$(find ${OUTPUT}/${_parameter}* -name train.log | awk -v params_index=${params_index} -F "/" '{print $params_index}') + metric_list=$(find ${OUTPUT}/${_parameter}* -name train.log | xargs cat | grep "best" | grep "Epoch ${EPOCHS}" | awk -F " " '{print substr($NF,0,7)}') + best_info=$(get_max_value ${metric_list[*]}) + best_metric=$(echo $best_info | awk -F " " '{print $1}') + best_index=$(echo $best_info | awk -F " " '{print $2}') + best_parameter=$(echo $parameters_list | awk -v best=$(($best_index+1)) '{print $best}' | awk -F "_" '{print $2}') + echo ${best_metric} + echo ${best_parameter} +} + + +function search_lr(){ + for lr in ${LR_LIST[*]}; do + cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \ + -c ${CONFIG} \ + -o Global.output_dir=${OUTPUT}/LR_${lr} \ + -o Optimizer.lr.learning_rate=${lr} \ + -o Global.epochs=${EPOCHS}" + eval ${cmd_train} + status_check $? "${cmd_train}" "${STATUS_LOG}" + cmd="find ${OUTPUT} -name epoch* | xargs rm -rf" + eval ${cmd} + done +} + + +function search_resolution(){ + _lr=$1 + for resolution in ${RESOLUTION_LIST[*]}; do + cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \ + -c ${CONFIG} \ + -o Global.output_dir=${OUTPUT}/RESOLUTION_${resolution} \ + -o Optimizer.lr.learning_rate=${_lr} \ + -o Global.epochs=${EPOCHS} \ + -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${resolution}" + eval ${cmd_train} + status_check $? "${cmd_train}" "${STATUS_LOG}" + cmd="find ${OUTPUT} -name epoch* | xargs rm -rf" + eval ${cmd} + done +} + + + +function search_ra_prob(){ + _lr=$1 + _resolution=$2 + for ra_prob in ${RA_PROB_LIST[*]}; do + cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \ + -c ${CONFIG} \ + -o Global.output_dir=${OUTPUT}/RA_${ra_prob} \ + -o Optimizer.lr.learning_rate=${_lr} \ + -o Global.epochs=${EPOCHS} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${ra_prob} \ + -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution}" + eval ${cmd_train} + status_check $? "${cmd_train}" "${STATUS_LOG}" + cmd="find ${OUTPUT} -name epoch* | xargs rm -rf" + eval ${cmd} + done +} + + + +function search_re_prob(){ + _lr=$1 + _resolution=$2 + _ra_prob=$3 + for re_prob in ${RE_PROB_LIST[*]}; do + cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \ + -c ${CONFIG} \ + -o Global.output_dir=${OUTPUT}/RE_${re_prob} \ + -o Optimizer.lr.learning_rate=${_lr} \ + -o Global.epochs=${EPOCHS} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \ + -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${re_prob} \ + -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution}" + eval ${cmd_train} + status_check $? "${cmd_train}" "${STATUS_LOG}" + cmd="find ${OUTPUT} -name epoch* | xargs rm -rf" + eval ${cmd} + done +} + + +function search_lr_mult_list(){ + _lr=$1 + _resolution=$2 + _ra_prob=$3 + _re_prob=$4 + + for lr_mult in ${LR_MULT_LIST[*]}; do + cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \ + -c ${CONFIG} \ + -o Global.output_dir=${OUTPUT}/LR_MULT_${lr_mult} \ + -o Optimizer.lr.learning_rate=${_lr} \ + -o Global.epochs=${EPOCHS} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \ + -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \ + -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \ + -o Arch.lr_mult_list=${lr_mult}" + eval ${cmd_train} + status_check $? "${cmd_train}" "${STATUS_LOG}" + cmd="find ${OUTPUT} -name epoch* | xargs rm -rf" + eval ${cmd} + done + +} + + +function search_teacher(){ + _lr=$1 + _resolution=$2 + _ra_prob=$3 + _re_prob=$4 + + for teacher in ${TEACHER_LIST[*]}; do + cmd_train="python3.7 -m paddle.distributed.launch --gpus=${GPU_IDS} tools/train.py \ + -c ${CONFIG} \ + -o Global.output_dir=${OUTPUT}/TEACHER_${teacher} \ + -o Optimizer.lr.learning_rate=${_lr} \ + -o Global.epochs=${EPOCHS} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \ + -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \ + -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \ + -o Arch.name=${teacher}" + eval ${cmd_train} + status_check $? "${cmd_train}" "${STATUS_LOG}" + cmd="find ${OUTPUT}/* -name epoch* | xargs rm -rf" + eval ${cmd} + done +} + + +# train the model for knowledge distillation +function train_distillation_model(){ + _lr=$1 + _resolution=$2 + _ra_prob=$3 + _re_prob=$4 + _lr_mult=$5 + teacher=$6 + t_pretrained_model="${OUTPUT}/TEACHER_${teacher}/${teacher}/best_model" + config="ppcls/configs/cls_demo/person/Distillation/PPLCNet_x1_0_distillation.yaml" + combined_label_list="./dataset/person/train_list_for_distill.txt" + + cmd_train="python3.7 -m paddle.distributed.launch \ + --gpus=${GPU_IDS} \ + tools/train.py -c ${config} \ + -o Global.output_dir=${OUTPUT}/kd_teacher \ + -o Optimizer.lr.learning_rate=${_lr} \ + -o Global.epochs=${EPOCHS} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.prob=${_ra_prob} \ + -o DataLoader.Train.dataset.transform_ops.5.RandomErasing.EPSILON=${_re_prob} \ + -o DataLoader.Train.dataset.transform_ops.1.RandCropImage.size=${_resolution} \ + -o DataLoader.Train.dataset.transform_ops.3.TimmAutoAugment.img_size=${_resolution} \ + -o DataLoader.Train.dataset.cls_label_path=${combined_label_list} \ + -o Arch.models.0.Teacher.name="${teacher}" \ + -o Arch.models.0.Teacher.pretrained="${t_pretrained_model}" \ + -o Arch.models.1.Student.lr_mult_list=${_lr_mult}" + eval ${cmd_train} + status_check $? "${cmd_train}" "${STATUS_LOG}" + cmd="find ${OUTPUT} -name epoch* | xargs rm -rf" + eval ${cmd} +} + +######## Train PaddleClas ######## +rm -rf ${OUTPUT} + +# Train and get best lr +best_lr=0.01 +if [[ ${MODE_MAP["search_lr"]} -eq 1 ]]; then + search_lr + best_info=$(get_best_info "LR_[0-9]") + best_metric=$(echo $best_info | awk -F " " '{print $1}') + best_lr=$(echo $best_info | awk -F " " '{print $2}') + echo "The best lr is ${best_lr}, and the best metric is ${best_metric}" >> ${RESULT} +fi + +# Train and get best resolution +best_resolution=192 +if [[ ${MODE_MAP["search_resolution"]} -eq 1 ]]; then + search_resolution "${best_lr}" + best_info=$(get_best_info "RESOLUTION") + best_metric=$(echo $best_info | awk -F " " '{print $1}') + best_resolution=$(echo $best_info | awk -F " " '{print $2}') + echo "The best resolution is ${best_resolution}, and the best metric is ${best_metric}" >> ${RESULT} +fi + +# Train and get best ra_prob +best_ra_prob=0.0 +if [[ ${MODE_MAP["search_ra_prob"]} -eq 1 ]]; then + search_ra_prob "${best_lr}" "${best_resolution}" + best_info=$(get_best_info "RA") + best_metric=$(echo $best_info | awk -F " " '{print $1}') + best_ra_prob=$(echo $best_info | awk -F " " '{print $2}') + echo "The best ra_prob is ${best_ra_prob}, and the best metric is ${best_metric}" >> ${RESULT} +fi + +# Train and get best re_prob +best_re_prob=0.1 +if [[ ${MODE_MAP["search_re_prob"]} -eq 1 ]]; then + search_re_prob "${best_lr}" "${best_resolution}" "${best_ra_prob}" + best_info=$(get_best_info "RE") + best_metric=$(echo $best_info | awk -F " " '{print $1}') + best_re_prob=$(echo $best_info | awk -F " " '{print $2}') + echo "The best re_prob is ${best_re_prob}, and the best metric is ${best_metric}" >> ${RESULT} +fi + +# Train and get best lr_mult_list +best_lr_mult_list=[1.0,1.0,1.0,1.0,1.0,1.0] +if [[ ${MODE_MAP["search_lr_mult_list"]} -eq 1 ]]; then + search_lr_mult_list "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}" + best_info=$(get_best_info "LR_MULT") + best_metric=$(echo $best_info | awk -F " " '{print $1}') + best_lr_mult_list=$(echo $best_info | awk -F " " '{print $2}') + echo "The best lr_mult_list is ${best_lr_mult_list}, and the best metric is ${best_metric}" >> ${RESULT} +fi + +# train and get best teacher +best_teacher="ResNet101_vd" +if [[ ${MODE_MAP["search_teacher"]} -eq 1 ]]; then + search_teacher "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}" + best_info=$(get_best_info "TEACHER") + best_metric=$(echo $best_info | awk -F " " '{print $1}') + best_teacher=$(echo $best_info | awk -F " " '{print $2}') + echo "The best teacher is ${best_teacher}, and the best metric is ${best_metric}" >> ${RESULT} +fi + +# train the distillation model +if [[ ${MODE_MAP["train_distillation_model"]} -eq 1 ]]; then + train_distillation_model "${best_lr}" "${best_resolution}" "${best_ra_prob}" "${best_re_prob}" "${best_lr_mult_list}" ${best_teacher} + best_info=$(get_best_info "kd_teacher/DistillationModel") + best_metric=$(echo $best_info | awk -F " " '{print $1}') + echo "the distillation best metric is ${best_metric}, it is global best metric!" >> ${RESULT} +fi + diff --git a/tools/search_strategy.py b/tools/search_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..abc406167946c82604f2e58f3835d4a37bbb694d --- /dev/null +++ b/tools/search_strategy.py @@ -0,0 +1,141 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) + +import subprocess +import numpy as np + +from ppcls.utils import config + + +def get_result(log_dir): + log_file = "{}/train.log".format(log_dir) + with open(log_file, "r") as f: + raw = f.read() + res = float(raw.split("best metric: ")[-1].split("]")[0]) + return res + + +def search_train(search_list, + base_program, + base_output_dir, + search_key, + config_replace_value, + model_name, + search_times=1): + best_res = 0. + best = search_list[0] + all_result = {} + for search_i in search_list: + program = base_program.copy() + for v in config_replace_value: + program += ["-o", "{}={}".format(v, search_i)] + if v == "Arch.name": + model_name = search_i + res_list = [] + for j in range(search_times): + output_dir = "{}/{}_{}_{}".format(base_output_dir, search_key, + search_i, j).replace(".", "_") + program += ["-o", "Global.output_dir={}".format(output_dir)] + process = subprocess.Popen(program) + process.communicate() + res = get_result("{}/{}".format(output_dir, model_name)) + res_list.append(res) + all_result[str(search_i)] = res_list + + if np.mean(res_list) > best_res: + best = search_i + best_res = np.mean(res_list) + all_result["best"] = best + return all_result + + +def search_strategy(): + args = config.parse_args() + configs = config.get_config( + args.config, overrides=args.override, show=False) + base_config_file = configs["base_config_file"] + distill_config_file = configs.get("distill_config_file", None) + model_name = config.get_config(base_config_file)["Arch"]["name"] + gpus = configs["gpus"] + gpus = ",".join([str(i) for i in gpus]) + base_program = [ + "python3.7", "-m", "paddle.distributed.launch", + "--gpus={}".format(gpus), "tools/train.py", "-c", base_config_file + ] + base_output_dir = configs["output_dir"] + search_times = configs["search_times"] + search_dict = configs.get("search_dict") + all_results = {} + for search_i in search_dict: + search_key = search_i["search_key"] + search_values = search_i["search_values"] + replace_config = search_i["replace_config"] + res = search_train(search_values, base_program, base_output_dir, + search_key, replace_config, model_name, + search_times) + all_results[search_key] = res + best = res.get("best") + for v in replace_config: + base_program += ["-o", "{}={}".format(v, best)] + + teacher_configs = configs.get("teacher", None) + if teacher_configs is None: + print(all_results, base_program) + return + + algo = teacher_configs.get("algorithm", "skl-ugi") + supported_list = ["skl-ugi", "udml"] + assert algo in supported_list, f"algorithm must be in {supported_list} but got {algo}" + if algo == "skl-ugi": + teacher_program = base_program.copy() + # remove incompatible keys + teacher_rm_keys = teacher_configs["rm_keys"] + rm_indices = [] + for rm_k in teacher_rm_keys: + for ind, ki in enumerate(base_program): + if rm_k in ki: + rm_indices.append(ind) + for rm_index in rm_indices[::-1]: + teacher_program.pop(rm_index) + teacher_program.pop(rm_index - 1) + replace_config = ["Arch.name"] + teacher_list = teacher_configs["search_values"] + res = search_train(teacher_list, teacher_program, base_output_dir, + "teacher", replace_config, model_name) + all_results["teacher"] = res + best = res.get("best") + t_pretrained = "{}/{}_{}_0/{}/best_model".format(base_output_dir, + "teacher", best, best) + base_program += [ + "-o", "Arch.models.0.Teacher.name={}".format(best), "-o", + "Arch.models.0.Teacher.pretrained={}".format(t_pretrained) + ] + elif algo == "udml": + if "lr_mult_list" in all_results: + base_program += [ + "-o", "Arch.models.0.Teacher.lr_mult_list={}".format( + all_results["lr_mult_list"]["best"]) + ] + + output_dir = "{}/search_res".format(base_output_dir) + base_program += ["-o", "Global.output_dir={}".format(output_dir)] + final_replace = configs.get('final_replace') + for i in range(len(base_program)): + base_program[i] = base_program[i].replace(base_config_file, + distill_config_file) + for k in final_replace: + v = final_replace[k] + base_program[i] = base_program[i].replace(k, v) + + process = subprocess.Popen(base_program) + process.communicate() + print(all_results, base_program) + + +if __name__ == '__main__': + search_strategy()