From 5025d09fd81cf5ed2f78839a99549b81579f7500 Mon Sep 17 00:00:00 2001 From: gaotingquan Date: Thu, 30 Sep 2021 10:37:32 +0000 Subject: [PATCH] fix: fix a DeprecationWarning & style --- .../backbone/model_zoo/vision_transformer.py | 38 +++++-------------- 1 file changed, 10 insertions(+), 28 deletions(-) diff --git a/ppcls/arch/backbone/model_zoo/vision_transformer.py b/ppcls/arch/backbone/model_zoo/vision_transformer.py index 8cfa5597..8bcfda02 100644 --- a/ppcls/arch/backbone/model_zoo/vision_transformer.py +++ b/ppcls/arch/backbone/model_zoo/vision_transformer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import Callable +from collections.abc import Callable import numpy as np import paddle @@ -331,9 +331,7 @@ def _load_pretrained(pretrained, model, model_url, use_ssld=False): ) -def ViT_small_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_small_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=768, @@ -350,9 +348,7 @@ def ViT_small_patch16_224(pretrained=False, return model -def ViT_base_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=768, @@ -370,9 +366,7 @@ def ViT_base_patch16_224(pretrained=False, return model -def ViT_base_patch16_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_base_patch16_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, @@ -391,9 +385,7 @@ def ViT_base_patch16_384(pretrained=False, return model -def ViT_base_patch32_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_base_patch32_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=32, @@ -412,9 +404,7 @@ def ViT_base_patch32_384(pretrained=False, return model -def ViT_large_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_large_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=1024, @@ -432,9 +422,7 @@ def ViT_large_patch16_224(pretrained=False, return model -def ViT_large_patch16_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_large_patch16_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, @@ -453,9 +441,7 @@ def ViT_large_patch16_384(pretrained=False, return model -def ViT_large_patch32_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_large_patch32_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=32, @@ -474,9 +460,7 @@ def ViT_large_patch32_384(pretrained=False, return model -def ViT_huge_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_huge_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=1280, @@ -492,9 +476,7 @@ def ViT_huge_patch16_224(pretrained=False, return model -def ViT_huge_patch32_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_huge_patch32_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=32, -- GitLab