From de4129baa63496820d93bbefb12e58786992ac27 Mon Sep 17 00:00:00 2001 From: Yang Nie Date: Fri, 3 Mar 2023 03:30:20 +0800 Subject: [PATCH] update --- docs/zh_CN/models/ImageNet1k/MobileViTv3.md | 108 ++++++++ docs/zh_CN/models/ImageNet1k/README.md | 21 +- ppcls/arch/backbone/__init__.py | 2 +- ppcls/arch/backbone/model_zoo/mobilevit_v3.py | 245 ++++++++++++++++-- .../ImageNet/MobileViTv3/MobileViTv3_S.yaml | 2 +- .../MobileViTv3/MobileViTv3_S_L2.yaml | 152 +++++++++++ .../ImageNet/MobileViTv3/MobileViTv3_XS.yaml | 2 +- .../MobileViTv3/MobileViTv3_XS_L2.yaml | 152 +++++++++++ .../ImageNet/MobileViTv3/MobileViTv3_XXS.yaml | 2 +- .../MobileViTv3/MobileViTv3_XXS_L2.yaml | 152 +++++++++++ .../MobileViTv3/MobileViTv3_x0_5.yaml | 62 +++-- .../MobileViTv3/MobileViTv3_x0_75.yaml | 176 +++++++++++++ .../MobileViTv3/MobileViTv3_x1_0.yaml | 176 +++++++++++++ .../MobileViTv3_S_train_infer_python.txt | 61 +++++ .../MobileViTv3_x1_0_train_infer_python.txt | 61 +++++ 15 files changed, 1331 insertions(+), 43 deletions(-) create mode 100644 docs/zh_CN/models/ImageNet1k/MobileViTv3.md create mode 100644 ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S_L2.yaml create mode 100644 ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS_L2.yaml create mode 100644 ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS_L2.yaml create mode 100644 ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_75.yaml create mode 100644 ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x1_0.yaml create mode 100644 test_tipc/configs/MobileViTv3/MobileViTv3_S_train_infer_python.txt create mode 100644 test_tipc/configs/MobileViTv3/MobileViTv3_x1_0_train_infer_python.txt diff --git a/docs/zh_CN/models/ImageNet1k/MobileViTv3.md b/docs/zh_CN/models/ImageNet1k/MobileViTv3.md new file mode 100644 index 00000000..179a35dc --- /dev/null +++ b/docs/zh_CN/models/ImageNet1k/MobileViTv3.md @@ -0,0 +1,108 @@ +# MobileviTv3 +----- + +## 目录 + +- [1. 模型介绍](#1) + - [1.1 模型简介](#1.1) + - [1.2 模型指标](#1.2) +- [2. 模型快速体验](#2) +- [3. 模型训练、评估和预测](#3) +- [4. 模型推理部署](#4) + - [4.1 推理模型准备](#4.1) + - [4.2 基于 Python 预测引擎推理](#4.2) + - [4.3 基于 C++ 预测引擎推理](#4.3) + - [4.4 服务化部署](#4.4) + - [4.5 端侧部署](#4.5) + - [4.6 Paddle2ONNX 模型转换与预测](#4.6) + + + +## 1. 模型介绍 + + + +### 1.1 模型简介 + +MobileViTv3 是一个结合 CNN 和 ViT 的轻量级模型,用于移动视觉任务。通过 MobileViTv3-block 解决了 MobileViTv1 的扩展问题并简化了学习任务,从而得倒了 MobileViTv3-XXS、XS 和 S 模型,在 ImageNet-1k、ADE20K、COCO 和 PascalVOC2012 数据集上表现优于 MobileViTv1。 +通过将提出的融合块添加到 MobileViTv2 中,创建 MobileViTv3-0.5、0.75 和 1.0 模型,在ImageNet-1k、ADE20K、COCO和PascalVOC2012数据集上给出了比 MobileViTv2 更好的准确性数据。[论文地址](https://arxiv.org/abs/2209.15159)。 + + + +### 1.2 模型指标 + +| Models | Top1 | Top5 | Reference
top1 | Reference
top5 | FLOPs
(G) | Params
(M) | +|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +| MobileViTv3_XXS | 0.7087 | 0.8976 | 0.7098 | - | 289.02 | 1.25 | +| MobileViTv3_XS | 0.7663 | 0.9332 | 0.7671 | - | 926.98 | 2.49 | +| MobileViTv3_S | 0.7928 | 0.9454 | 0.7930 | - | 1841.39 | 5.76 | +| MobileViTv3_XXS_L2 | 0.7028 | 0.8942 | 0.7023 | - | 256.97 | 1.15 | +| MobileViTv3_XS_L2 | 0.7607 | 0.9300 | 0.7610 | - | 852.82 | 2.26 | +| MobileViTv3_S_L2 | 0.7907 | 0.9440 | 0.7906 | - | 1651.96 | 5.17 | +| MobileViTv3_x0_5 | 0.7200 | 0.9083 | 0.7233 | - | 481.33 | 1.43 | +| MobileViTv3_x0_75 | 0.7626 | 0.9308 | 0.7655 | - | 1064.48 | 3.00 | +| MobileViTv3_x1_0 | 0.7838 | 0.9421 | 0.7864 | - | 1875.96 | 5.14 | + +**备注:** PaddleClas 所提供的该系列模型的预训练模型权重,均是基于其官方提供的权重转得。 + + + +## 2. 模型快速体验 + +安装 paddlepaddle 和 paddleclas 即可快速对图片进行预测,体验方法可以参考[ResNet50 模型快速体验](./ResNet.md#2)。 + + + +## 3. 模型训练、评估和预测 + +此部分内容包括训练环境配置、ImageNet数据的准备、该模型在 ImageNet 上的训练、评估、预测等内容。在 `ppcls/configs/ImageNet/MobileViTv3/` 中提供了该模型的训练配置,启动训练方法可以参考:[ResNet50 模型训练、评估和预测](./ResNet.md#3-模型训练评估和预测)。 + +**备注:** 由于 MobileViT 系列模型默认使用的 GPU 数量为 8 个,所以在训练时,需要指定8个GPU,如`python3 -m paddle.distributed.launch --gpus="0,1,2,3,4,5,6,7" tools/train.py -c xxx.yaml`, 如果使用 4 个 GPU 训练,默认学习率需要减小一半,精度可能有损。 + + + +## 4. 模型推理部署 + + + +### 4.1 推理模型准备 + +Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。相比于直接基于预训练模型进行预测,Paddle Inference可使用 MKLDNN、CUDNN、TensorRT 进行预测加速,从而实现更优的推理性能。更多关于Paddle Inference推理引擎的介绍,可以参考[Paddle Inference官网教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/infer/inference/inference_cn.html)。 + +Inference 的获取可以参考 [ResNet50 推理模型准备](./ResNet.md#4.1) 。 + + + +### 4.2 基于 Python 预测引擎推理 + +PaddleClas 提供了基于 python 预测引擎推理的示例。您可以参考[ResNet50 基于 Python 预测引擎推理](./ResNet.md#4.2) 完成模型的推理预测。 + + + +### 4.3 基于 C++ 预测引擎推理 + +PaddleClas 提供了基于 C++ 预测引擎推理的示例,您可以参考[服务器端 C++ 预测](../../deployment/image_classification/cpp/linux.md)来完成相应的推理部署。如果您使用的是 Windows 平台,可以参考[基于 Visual Studio 2019 Community CMake 编译指南](../../deployment/image_classification/cpp/windows.md)完成相应的预测库编译和模型预测工作。 + + + +### 4.4 服务化部署 + +Paddle Serving 提供高性能、灵活易用的工业级在线推理服务。Paddle Serving 支持 RESTful、gRPC、bRPC 等多种协议,提供多种异构硬件和多种操作系统环境下推理解决方案。更多关于Paddle Serving 的介绍,可以参考[Paddle Serving 代码仓库](https://github.com/PaddlePaddle/Serving)。 + +PaddleClas 提供了基于 Paddle Serving 来完成模型服务化部署的示例,您可以参考[模型服务化部署](../../deployment/image_classification/paddle_serving.md)来完成相应的部署工作。 + + + +### 4.5 端侧部署 + +Paddle Lite 是一个高性能、轻量级、灵活性强且易于扩展的深度学习推理框架,定位于支持包括移动端、嵌入式以及服务器端在内的多硬件平台。更多关于 Paddle Lite 的介绍,可以参考[Paddle Lite 代码仓库](https://github.com/PaddlePaddle/Paddle-Lite)。 + +PaddleClas 提供了基于 Paddle Lite 来完成模型端侧部署的示例,您可以参考[端侧部署](../../deployment/image_classification/paddle_lite.md)来完成相应的部署工作。 + + + +### 4.6 Paddle2ONNX 模型转换与预测 + +Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式。通过 ONNX 可以完成将 Paddle 模型到多种推理引擎的部署,包括TensorRT/OpenVINO/MNN/TNN/NCNN,以及其它对 ONNX 开源格式进行支持的推理引擎或硬件。更多关于 Paddle2ONNX 的介绍,可以参考[Paddle2ONNX 代码仓库](https://github.com/PaddlePaddle/Paddle2ONNX)。 + +PaddleClas 提供了基于 Paddle2ONNX 来完成 inference 模型转换 ONNX 模型并作推理预测的示例,您可以参考[Paddle2ONNX 模型转换与预测](../../deployment/image_classification/paddle2onnx.md)来完成相应的部署工作。 diff --git a/docs/zh_CN/models/ImageNet1k/README.md b/docs/zh_CN/models/ImageNet1k/README.md index 20cf596c..20965e34 100644 --- a/docs/zh_CN/models/ImageNet1k/README.md +++ b/docs/zh_CN/models/ImageNet1k/README.md @@ -796,15 +796,24 @@ DeiT(Data-efficient Image Transformers)系列模型的精度、速度指标 -## MobileViT 系列 [[42](#ref42)] +## MobileViT 系列 [[42](#ref42)][[51](#ref51)] -关于 MobileViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MobileViT 系列模型文档](MobileViT.md)。 +关于 MobileViT 系列模型的精度、速度指标如下表所示,更多介绍可以参考:[MobileViT 系列模型文档](MobileViT.md), [MobileViTv3 系列模型文档](MobileViTv3.md)。 | 模型 | Top-1 Acc | Top-5 Acc | time(ms)
bs=1 | time(ms)
bs=4 | time(ms)
bs=8 | FLOPs(M) | Params(M) | 预训练模型下载地址 | inference模型下载地址 | | ---------- | --------- | --------- | ---------------- | ---------------- | -------- | --------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| MobileViT_XXS | 0.6867 | 0.8878 | - | - | - | 337.24 | 1.28 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XXS_infer.tar) | -| MobileViT_XS | 0.7454 | 0.9227 | - | - | - | 930.75 | 2.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XS_infer.tar) | -| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 1849.35 | 5.59 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) | +| MobileViT_XXS | 0.6867 | 0.8878 | - | - | - | 337.24 | 1.28 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XXS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XXS_infer.tar) | +| MobileViT_XS | 0.7454 | 0.9227 | - | - | - | 930.75 | 2.33 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_XS_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_XS_infer.tar) | +| MobileViT_S | 0.7814 | 0.9413 | - | - | - | 1849.35 | 5.59 | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileViT_S_pretrained.pdparams) | [下载链接](https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileViT_S_infer.tar) | +| MobileViTv3_XXS | 0.7087 | 0.8976 | - | - | - | 289.02 | 1.25 | [下载链接]() | [下载链接]() | +| MobileViTv3_XS | 0.7663 | 0.9332 | - | - | - | 926.98 | 2.49 | [下载链接]() | [下载链接]() | +| MobileViTv3_S | 0.7928 | 0.9454 | - | - | - | 1841.39 | 5.76 | [下载链接]() | [下载链接]() | +| MobileViTv3_XXS_L2 | 0.7028 | 0.8942 | - | - | - | 256.97 | 1.15 | [下载链接]() | [下载链接]() | +| MobileViTv3_XS_L2 | 0.7607 | 0.9300 | - | - | - | 852.82 | 2.26 | [下载链接]() | [下载链接]() | +| MobileViTv3_S_L2 | 0.7907 | 0.9440 | - | - | - | 1651.96 | 5.17 | [下载链接]() | [下载链接]() | +| MobileViTv3_x0_5 | 0.7200 | 0.9083 | - | - | - | 481.33 | 1.43 | [下载链接]() | [下载链接]() | +| MobileViTv3_x0_75 | 0.7626 | 0.9308 | - | - | - | 1064.48 | 3.00 | [下载链接]() | [下载链接]() | +| MobileViTv3_x1_0 | 0.7838 | 0.9421 | - | - | - | 1875.96 | 5.14 | [下载链接]() | [下载链接]() | @@ -910,3 +919,5 @@ TRANSFORMERS FOR IMAGE RECOGNITION AT SCALE. [49]Mingyuan Mao, Renrui Zhang, Honghui Zheng, Peng Gao, Teli Ma, Yan Peng, Errui Ding, Baochang Zhang, Shumin Han. Dual-stream Network for Visual Recognition. [50]Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. Swin Transformer V2: Scaling Up Capacity and Resolution + +[51]Wadekar, Shakti N. and Chaurasia, Abhishek. MobileViTv3: Mobile-Friendly Vision Transformer with Simple and Effective Fusion of Local, Global and Input Features diff --git a/ppcls/arch/backbone/__init__.py b/ppcls/arch/backbone/__init__.py index 542cac55..95b0f7e3 100644 --- a/ppcls/arch/backbone/__init__.py +++ b/ppcls/arch/backbone/__init__.py @@ -78,7 +78,7 @@ from .model_zoo.cae import cae_base_patch16_224, cae_large_patch16_224 from .model_zoo.cvt import CvT_13_224, CvT_13_384, CvT_21_224, CvT_21_384, CvT_W24_384 from .model_zoo.micronet import MicroNet_M0, MicroNet_M1, MicroNet_M2, MicroNet_M3 from .model_zoo.mobilenext import MobileNeXt_x0_35, MobileNeXt_x0_5, MobileNeXt_x0_75, MobileNeXt_x1_0, MobileNeXt_x1_4 -from .model_zoo.mobilevit_v3 import MobileViTv3_XXS, MobileViTv3_XS, MobileViTv3_S, MobileViTv3_x0_5, MobileViTv3_x0_75, MobileViTv3_x1_0 +from .model_zoo.mobilevit_v3 import MobileViTv3_XXS, MobileViTv3_XS, MobileViTv3_S, MobileViTv3_XXS_L2, MobileViTv3_XS_L2, MobileViTv3_S_L2, MobileViTv3_x0_5, MobileViTv3_x0_75, MobileViTv3_x1_0 from .variant_models.resnet_variant import ResNet50_last_stage_stride1 from .variant_models.resnet_variant import ResNet50_adaptive_max_pool2d diff --git a/ppcls/arch/backbone/model_zoo/mobilevit_v3.py b/ppcls/arch/backbone/model_zoo/mobilevit_v3.py index 2efb4250..61b2d845 100644 --- a/ppcls/arch/backbone/model_zoo/mobilevit_v3.py +++ b/ppcls/arch/backbone/model_zoo/mobilevit_v3.py @@ -29,6 +29,9 @@ MODEL_URLS = { "MobileViTv3_XXS": "", "MobileViTv3_XS": "", "MobileViTv3_S": "", + "MobileViTv3_XXS_L2": "", + "MobileViTv3_XS_L2": "", + "MobileViTv3_S_L2": "", "MobileViTv3_x0_5": "", "MobileViTv3_x0_75": "", "MobileViTv3_x1_0": "", @@ -319,7 +322,7 @@ class MobileViTv3Block(nn.Layer): transposed_fm = reshaped_fm.transpose([0, 3, 2, 1]) # [B, P, N, C] --> [BP, N, C] patches = transposed_fm.reshape( - [batch_size * patch_area, num_patches, -1]) + [batch_size * patch_area, num_patches, in_channels]) info_dict = { "orig_size": (orig_h, orig_w), @@ -339,7 +342,7 @@ class MobileViTv3Block(nn.Layer): # [BP, N, C] --> [B, P, N, C] patches = patches.reshape([ info_dict["batch_size"], self.patch_area, - info_dict["total_patches"], -1 + info_dict["total_patches"], patches.shape[2] ]) batch_size, pixels, num_patches, channels = patches.shape @@ -399,6 +402,7 @@ class MobileViTv3Block(nn.Layer): class LinearSelfAttention(nn.Layer): def __init__(self, embed_dim, attn_dropout=0.0, bias=True): super().__init__() + self.embed_dim = embed_dim self.qkv_proj = nn.Conv2D( embed_dim, 1 + (2 * embed_dim), 1, bias_attr=bias) self.attn_dropout = nn.Dropout(p=attn_dropout) @@ -428,7 +432,7 @@ class LinearSelfAttention(nn.Layer): # combine context vector with values # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] - out = F.relu(value) * context_vector.expand_as(value) + out = F.relu(value) * context_vector out = self.out_proj(out) return out @@ -552,10 +556,11 @@ class MobileViTv3Block_v2(nn.Layer): # [B, C, H, W] --> [B, C, P, N] patches = F.unfold( feature_map, - kernel_sizes=(self.patch_h, self.patch_w), - stride=(self.patch_h, self.patch_w)) + kernel_sizes=[self.patch_h, self.patch_w], + strides=[self.patch_h, self.patch_w]) + n_patches = img_h * img_w // (self.patch_h * self.patch_w) patches = patches.reshape( - [batch_size, in_channels, self.patch_h * self.patch_w, -1]) + [batch_size, in_channels, self.patch_h * self.patch_w, n_patches]) return patches, (img_h, img_w) @@ -567,9 +572,9 @@ class MobileViTv3Block_v2(nn.Layer): feature_map = F.fold( patches, - output_sizes=output_size, - kernel_sizes=(self.patch_h, self.patch_w), - stride=(self.patch_h, self.patch_w)) + output_size, + kernel_sizes=[self.patch_h, self.patch_w], + strides=[self.patch_h, self.patch_w]) return feature_map @@ -656,16 +661,16 @@ class MobileViTv3(nn.Layer): cfg=mobilevit_config["layer5"], dilate=dilate_l5) - in_channels = out_channels - exp_channels = min(mobilevit_config["last_layer_exp_factor"] * - in_channels, 960) if self.mobilevit_v2_based: self.conv_1x1_exp = nn.Identity() else: + in_channels = out_channels + out_channels = min(mobilevit_config["last_layer_exp_factor"] * + in_channels, 960) self.conv_1x1_exp = nn.Sequential( ('conv', nn.Conv2D( - in_channels, exp_channels, 1, bias_attr=False)), - ('norm', nn.BatchNorm2D(exp_channels)), ('act', nn.Silu())) + in_channels, out_channels, 1, bias_attr=False)), + ('norm', nn.BatchNorm2D(out_channels)), ('act', nn.Silu())) self.classifier = nn.Sequential() self.classifier.add_sublayer( @@ -675,7 +680,7 @@ class MobileViTv3(nn.Layer): self.classifier.add_sublayer( name="dropout", sublayer=nn.Dropout(p=classifier_dropout)) self.classifier.add_sublayer( - name="fc", sublayer=nn.Linear(exp_channels, class_num)) + name="fc", sublayer=nn.Linear(out_channels, class_num)) # weight initialization self.apply(self._init_weights) @@ -1022,6 +1027,216 @@ def MobileViTv3_XXS(pretrained=False, use_ssld=False, **kwargs): return model +def MobileViTv3_S_L2(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 4 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 32, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 64, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 128, + "transformer_channels": 144, + "ffn_dim": 288, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 256, + "transformer_channels": 192, + "ffn_dim": 384, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 320, + "transformer_channels": 240, + "ffn_dim": 480, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTv3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTv3_S_L2"], use_ssld=use_ssld) + return model + + +def MobileViTv3_XS_L2(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 4 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 32, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 48, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 96, + "transformer_channels": 96, + "ffn_dim": 192, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 160, + "transformer_channels": 120, + "ffn_dim": 240, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 160, + "transformer_channels": 144, + "ffn_dim": 288, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTv3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTv3_XS_L2"], use_ssld=use_ssld) + return model + + +def MobileViTv3_XXS_L2(pretrained=False, use_ssld=False, **kwargs): + mv2_exp_mult = 2 + mobilevit_config = { + "layer0": { + "img_channels": 3, + "out_channels": 16, + }, + "layer1": { + "out_channels": 16, + "expand_ratio": mv2_exp_mult, + "num_blocks": 1, + "stride": 1, + "block_type": "mv2" + }, + "layer2": { + "out_channels": 24, + "expand_ratio": mv2_exp_mult, + "num_blocks": 3, + "stride": 2, + "block_type": "mv2" + }, + "layer3": { # 28x28 + "out_channels": 64, + "transformer_channels": 64, + "ffn_dim": 128, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer4": { # 14x14 + "out_channels": 80, + "transformer_channels": 80, + "ffn_dim": 160, + "transformer_blocks": 2, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "layer5": { # 7x7 + "out_channels": 128, + "transformer_channels": 96, + "ffn_dim": 192, + "transformer_blocks": 3, + "patch_h": 2, + "patch_w": 2, + "stride": 2, + "mv_expand_ratio": mv2_exp_mult, + "head_dim": None, + "num_heads": 4, + "block_type": "mobilevit" + }, + "last_layer_exp_factor": 4 + } + + model = MobileViTv3(mobilevit_config, **kwargs) + + _load_pretrained( + pretrained, model, MODEL_URLS["MobileViTv3_XXS_L2"], use_ssld=use_ssld) + return model + + def MobileViTv3_x1_0(pretrained=False, use_ssld=False, **kwargs): mobilevit_config = { "layer0": { diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S.yaml index ffeff778..dee26015 100644 --- a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S.yaml +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S.yaml @@ -99,7 +99,7 @@ DataLoader: cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: - DecodeImage: - to_rgb: False + to_rgb: True channel_first: False - ResizeImage: resize_short: 288 diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S_L2.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S_L2.yaml new file mode 100644 index 00000000..06e0037b --- /dev/null +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S_L2.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + scale_loss: 65536 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTv3_S_L2 + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + # by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS.yaml index f6887ba4..f803349c 100644 --- a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS.yaml +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS.yaml @@ -99,7 +99,7 @@ DataLoader: cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: - DecodeImage: - to_rgb: False + to_rgb: True channel_first: False - ResizeImage: resize_short: 288 diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS_L2.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS_L2.yaml new file mode 100644 index 00000000..5900f17f --- /dev/null +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XS_L2.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + scale_loss: 65536 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTv3_XS_L2 + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + # by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS.yaml index 3b1ab037..3cd05297 100644 --- a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS.yaml +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS.yaml @@ -99,7 +99,7 @@ DataLoader: cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: - DecodeImage: - to_rgb: False + to_rgb: True channel_first: False - ResizeImage: resize_short: 288 diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS_L2.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS_L2.yaml new file mode 100644 index 00000000..09767a1f --- /dev/null +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_XXS_L2.yaml @@ -0,0 +1,152 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + scale_loss: 65536 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTv3_XXS_L2 + class_num: 1000 + dropout: 0.1 + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.01 + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 1 # 3000 iterations + warmup_start_lr: 0.0002 + # by_epoch: True + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: MultiScaleDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - RandCropImage: + size: 256 + interpolation: bilinear + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + # support to specify width and height respectively: + # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + sampler: + name: MultiScaleSampler + scales: [256, 160, 192, 224, 288, 320] + # first_bs: batch size for the first image resolution in the scales list + # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple + first_bs: 48 + divided_factor: 32 + is_training: True + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 48 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_rgb: True + channel_first: False + - ResizeImage: + resize_short: 288 + interpolation: bilinear + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_5.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_5.yaml index 44de4fb6..a9e783fb 100644 --- a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_5.yaml +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_5.yaml @@ -47,48 +47,68 @@ Optimizer: beta1: 0.9 beta2: 0.999 epsilon: 1e-8 - weight_decay: 0.01 + weight_decay: 0.05 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True lr: # for 8 cards name: Cosine learning_rate: 0.002 eta_min: 0.0002 - warmup_epoch: 1 # 3000 iterations - warmup_start_lr: 0.0002 + warmup_epoch: 20 # 20000 iterations + warmup_start_lr: 1e-6 # by_epoch: True + clip_norm: 10 # data loader for train and eval DataLoader: Train: dataset: - name: MultiScaleDataset + name: ImageNetDataset image_root: ./dataset/ILSVRC2012/ cls_label_path: ./dataset/ILSVRC2012/train_list.txt transform_ops: - DecodeImage: to_rgb: True channel_first: False + backend: pil - RandCropImage: size: 256 - interpolation: bilinear + interpolation: bicubic + backend: pil use_log_aspect: True - RandFlipImage: flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 - NormalizeImage: scale: 1.0/255.0 mean: [0.0, 0.0, 0.0] std: [1.0, 1.0, 1.0] order: '' - # support to specify width and height respectively: - # scales: [(256,256) (160,160), (192,192), (224,224) (288,288) (320,320)] + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 sampler: - name: MultiScaleSampler - scales: [256, 160, 192, 224, 288, 320] - # first_bs: batch size for the first image resolution in the scales list - # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple - first_bs: 48 - divided_factor: 32 - is_training: True + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False loader: num_workers: 4 use_shared_memory: True @@ -99,11 +119,13 @@ DataLoader: cls_label_path: ./dataset/ILSVRC2012/val_list.txt transform_ops: - DecodeImage: - to_rgb: False + to_np: False channel_first: False + backend: pil - ResizeImage: resize_short: 288 - interpolation: bilinear + interpolation: bicubic + backend: pil - CropImage: size: 256 - NormalizeImage: @@ -113,7 +135,7 @@ DataLoader: order: '' sampler: name: DistributedBatchSampler - batch_size: 48 + batch_size: 128 drop_last: False shuffle: False loader: @@ -125,11 +147,13 @@ Infer: batch_size: 10 transforms: - DecodeImage: - to_rgb: True + to_np: False channel_first: False + backend: pil - ResizeImage: resize_short: 288 - interpolation: bilinear + interpolation: bicubic + backend: pil - CropImage: size: 256 - NormalizeImage: diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_75.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_75.yaml new file mode 100644 index 00000000..eeadb179 --- /dev/null +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x0_75.yaml @@ -0,0 +1,176 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + scale_loss: 65536 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTv3_x0_75 + class_num: 1000 + classifier_dropout: 0. + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 20 # 20000 iterations + warmup_start_lr: 1e-6 + # by_epoch: True + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x1_0.yaml b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x1_0.yaml new file mode 100644 index 00000000..9f49e9de --- /dev/null +++ b/ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x1_0.yaml @@ -0,0 +1,176 @@ +# global configs +Global: + checkpoints: null + pretrained_model: null + output_dir: ./output/ + device: gpu + save_interval: 1 + eval_during_train: True + eval_interval: 1 + epochs: 300 + print_batch_step: 10 + use_visualdl: False + # used for static mode and model export + image_shape: [3, 256, 256] + save_inference_dir: ./inference + use_dali: False + +# mixed precision training +AMP: + scale_loss: 65536 + use_dynamic_loss_scaling: True + # O1: mixed fp16 + level: O1 + +# model ema +EMA: + decay: 0.9995 + +# model architecture +Arch: + name: MobileViTv3_x1_0 + class_num: 1000 + classifier_dropout: 0. + +# loss function config for traing/eval process +Loss: + Train: + - CELoss: + weight: 1.0 + epsilon: 0.1 + Eval: + - CELoss: + weight: 1.0 + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.999 + epsilon: 1e-8 + weight_decay: 0.05 + no_weight_decay_name: .bias norm + one_dim_param_no_weight_decay: True + lr: + # for 8 cards + name: Cosine + learning_rate: 0.002 + eta_min: 0.0002 + warmup_epoch: 20 # 20000 iterations + warmup_start_lr: 1e-6 + # by_epoch: True + clip_norm: 10 + +# data loader for train and eval +DataLoader: + Train: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/train_list.txt + transform_ops: + - DecodeImage: + to_rgb: True + channel_first: False + backend: pil + - RandCropImage: + size: 256 + interpolation: bicubic + backend: pil + use_log_aspect: True + - RandFlipImage: + flip_code: 1 + - TimmAutoAugment: + config_str: rand-m9-mstd0.5-inc1 + interpolation: bicubic + img_size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - RandomErasing: + EPSILON: 0.25 + sl: 0.02 + sh: 1.0/3.0 + r1: 0.3 + attempt: 10 + use_log_aspect: True + mode: pixel + batch_transform_ops: + - OpSampler: + MixupOperator: + alpha: 0.2 + prob: 0.5 + CutmixOperator: + alpha: 1.0 + prob: 0.5 + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + Eval: + dataset: + name: ImageNetDataset + image_root: ./dataset/ILSVRC2012/ + cls_label_path: ./dataset/ILSVRC2012/val_list.txt + transform_ops: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + sampler: + name: DistributedBatchSampler + batch_size: 128 + drop_last: False + shuffle: False + loader: + num_workers: 4 + use_shared_memory: True + +Infer: + infer_imgs: docs/images/inference_deployment/whl_demo.jpg + batch_size: 10 + transforms: + - DecodeImage: + to_np: False + channel_first: False + backend: pil + - ResizeImage: + resize_short: 288 + interpolation: bicubic + backend: pil + - CropImage: + size: 256 + - NormalizeImage: + scale: 1.0/255.0 + mean: [0.0, 0.0, 0.0] + std: [1.0, 1.0, 1.0] + order: '' + - ToCHWImage: + PostProcess: + name: Topk + topk: 5 + class_id_map_file: ppcls/utils/imagenet1k_label_list.txt + +Metric: + Train: + - TopkAcc: + topk: [1, 5] + Eval: + - TopkAcc: + topk: [1, 5] diff --git a/test_tipc/configs/MobileViTv3/MobileViTv3_S_train_infer_python.txt b/test_tipc/configs/MobileViTv3/MobileViTv3_S_train_infer_python.txt new file mode 100644 index 00000000..cbf1761d --- /dev/null +++ b/test_tipc/configs/MobileViTv3/MobileViTv3_S_train_infer_python.txt @@ -0,0 +1,61 @@ +===========================train_params=========================== +model_name:MobileViTv3_S +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.first_bs:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_S.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +inference_dir:null +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=288 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +model_type:norm_train +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,256,256]}] diff --git a/test_tipc/configs/MobileViTv3/MobileViTv3_x1_0_train_infer_python.txt b/test_tipc/configs/MobileViTv3/MobileViTv3_x1_0_train_infer_python.txt new file mode 100644 index 00000000..d28792e7 --- /dev/null +++ b/test_tipc/configs/MobileViTv3/MobileViTv3_x1_0_train_infer_python.txt @@ -0,0 +1,61 @@ +===========================train_params=========================== +model_name:MobileViTv3_x1_0 +python:python3.7 +gpu_list:0|0,1 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False -o Global.print_batch_step=1 -o Global.eval_during_train=False -o Global.save_interval=2 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileViTv3/MobileViTv3_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:null +export2:null +inference_dir:null +infer_model:../inference/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=288 -o PreProcess.transform_ops.1.CropImage.size=256 -o PreProcess.transform_ops.2.NormalizeImage.mean=[0.,0.,0.] -o PreProcess.transform_ops.2.NormalizeImage.std=[1.,1.,1.] +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +model_type:norm_train +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,256,256]}] -- GitLab