From 5c39dfa6ba597f97df521560a823f91d5c70b43f Mon Sep 17 00:00:00 2001 From: gaotingquan Date: Fri, 24 Mar 2023 09:12:44 +0000 Subject: [PATCH] rename gvt.py -> twins.py & twins-svt -> twins-alt-gvt --- docs/en/models/Twins_en.md | 2 +- docs/zh_CN/models/ImageNet1k/Twins.md | 2 +- ppcls/arch/backbone/__init__.py | 2 +- ppcls/arch/backbone/model_zoo/{gvt.py => twins.py} | 0 4 files changed, 3 insertions(+), 3 deletions(-) rename ppcls/arch/backbone/model_zoo/{gvt.py => twins.py} (100%) diff --git a/docs/en/models/Twins_en.md b/docs/en/models/Twins_en.md index 0096066e..0a9dede0 100644 --- a/docs/en/models/Twins_en.md +++ b/docs/en/models/Twins_en.md @@ -7,7 +7,7 @@ ## 1. Overview -The Twins network includes Twins-PCPVT and Twins-SVT, which focuses on the meticulous design of the spatial attention mechanism, resulting in a simple but more effective solution. Since the architecture only involves matrix multiplication, and the current deep learning framework has a high degree of optimization for matrix multiplication, the architecture is very efficient and easy to implement. Moreover, this architecture can achieve excellent performance in a variety of downstream vision tasks such as image classification, target detection, and semantic segmentation. [Paper](https://arxiv.org/abs/2104.13840). +The Twins network includes Twins-PCPVT and Twins-AVT-GVT, which focuses on the meticulous design of the spatial attention mechanism, resulting in a simple but more effective solution. Since the architecture only involves matrix multiplication, and the current deep learning framework has a high degree of optimization for matrix multiplication, the architecture is very efficient and easy to implement. Moreover, this architecture can achieve excellent performance in a variety of downstream vision tasks such as image classification, target detection, and semantic segmentation. [Paper](https://arxiv.org/abs/2104.13840). ## 2. Accuracy, FLOPs and Parameters diff --git a/docs/zh_CN/models/ImageNet1k/Twins.md b/docs/zh_CN/models/ImageNet1k/Twins.md index 728a2208..d35ef8f4 100644 --- a/docs/zh_CN/models/ImageNet1k/Twins.md +++ b/docs/zh_CN/models/ImageNet1k/Twins.md @@ -26,7 +26,7 @@ ### 1.1 模型简介 -Twins 网络包括 Twins-PCPVT 和 Twins-SVT,其重点对空间注意力机制进行了精心设计,得到了简单却更为有效的方案。由于该体系结构仅涉及矩阵乘法,而目前的深度学习框架中对矩阵乘法有较高的优化程度,因此该体系结构十分高效且易于实现。并且,该体系结构在图像分类、目标检测和语义分割等多种下游视觉任务中都能够取得优异的性能。[论文地址](https://arxiv.org/abs/2104.13840)。 +Twins 网络包括 Twins-PCPVT 和 Twins-ALT-GVT,其重点对空间注意力机制进行了精心设计,得到了简单却更为有效的方案。由于该体系结构仅涉及矩阵乘法,而目前的深度学习框架中对矩阵乘法有较高的优化程度,因此该体系结构十分高效且易于实现。并且,该体系结构在图像分类、目标检测和语义分割等多种下游视觉任务中都能够取得优异的性能。[论文地址](https://arxiv.org/abs/2104.13840)。 diff --git a/ppcls/arch/backbone/__init__.py b/ppcls/arch/backbone/__init__.py index 78e9b4dc..ee4afdf3 100644 --- a/ppcls/arch/backbone/__init__.py +++ b/ppcls/arch/backbone/__init__.py @@ -59,7 +59,7 @@ from .model_zoo.swin_transformer_v2 import SwinTransformerV2_tiny_patch4_window8 from .model_zoo.cswin_transformer import CSWinTransformer_tiny_224, CSWinTransformer_small_224, CSWinTransformer_base_224, CSWinTransformer_large_224, CSWinTransformer_base_384, CSWinTransformer_large_384 from .model_zoo.mixnet import MixNet_S, MixNet_M, MixNet_L from .model_zoo.rexnet import ReXNet_1_0, ReXNet_1_3, ReXNet_1_5, ReXNet_2_0, ReXNet_3_0 -from .model_zoo.gvt import pcpvt_small, pcpvt_base, pcpvt_large, alt_gvt_small, alt_gvt_base, alt_gvt_large +from .model_zoo.twins import pcpvt_small, pcpvt_base, pcpvt_large, alt_gvt_small, alt_gvt_base, alt_gvt_large from .model_zoo.levit import LeViT_128S, LeViT_128, LeViT_192, LeViT_256, LeViT_384 from .model_zoo.dla import DLA34, DLA46_c, DLA46x_c, DLA60, DLA60x, DLA60x_c, DLA102, DLA102x, DLA102x2, DLA169 from .model_zoo.rednet import RedNet26, RedNet38, RedNet50, RedNet101, RedNet152 diff --git a/ppcls/arch/backbone/model_zoo/gvt.py b/ppcls/arch/backbone/model_zoo/twins.py similarity index 100% rename from ppcls/arch/backbone/model_zoo/gvt.py rename to ppcls/arch/backbone/model_zoo/twins.py -- GitLab