diff --git a/ppcls/arch/backbone/model_zoo/vision_transformer.py b/ppcls/arch/backbone/model_zoo/vision_transformer.py index 8cfa55975284ba46d5f896b8d434d6e29c9f63d4..8bcfda02e2c44098246a7bfe4516f014d3e1399e 100644 --- a/ppcls/arch/backbone/model_zoo/vision_transformer.py +++ b/ppcls/arch/backbone/model_zoo/vision_transformer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import Callable +from collections.abc import Callable import numpy as np import paddle @@ -331,9 +331,7 @@ def _load_pretrained(pretrained, model, model_url, use_ssld=False): ) -def ViT_small_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_small_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=768, @@ -350,9 +348,7 @@ def ViT_small_patch16_224(pretrained=False, return model -def ViT_base_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_base_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=768, @@ -370,9 +366,7 @@ def ViT_base_patch16_224(pretrained=False, return model -def ViT_base_patch16_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_base_patch16_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, @@ -391,9 +385,7 @@ def ViT_base_patch16_384(pretrained=False, return model -def ViT_base_patch32_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_base_patch32_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=32, @@ -412,9 +404,7 @@ def ViT_base_patch32_384(pretrained=False, return model -def ViT_large_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_large_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=1024, @@ -432,9 +422,7 @@ def ViT_large_patch16_224(pretrained=False, return model -def ViT_large_patch16_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_large_patch16_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, @@ -453,9 +441,7 @@ def ViT_large_patch16_384(pretrained=False, return model -def ViT_large_patch32_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_large_patch32_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=32, @@ -474,9 +460,7 @@ def ViT_large_patch32_384(pretrained=False, return model -def ViT_huge_patch16_224(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_huge_patch16_224(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=1280, @@ -492,9 +476,7 @@ def ViT_huge_patch16_224(pretrained=False, return model -def ViT_huge_patch32_384(pretrained=False, - use_ssld=False, - **kwargs): +def ViT_huge_patch32_384(pretrained=False, use_ssld=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=32,