未验证 提交 1a74e9cb 编写于 作者: C cuicheng01 提交者: GitHub

Merge pull request #756 from RainFrost1/develop_reg

legendary models v0.1
from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152, ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd
from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W64_C
from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1
from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25
from .inception_v3 import InceptionV3
from .vgg import VGG11, VGG13, VGG16, VGG19
......@@ -13,39 +13,37 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import math
import paddle
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform
import math
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"InceptionV3": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/InceptionV3_pretrained.pdparams",
"InceptionV3":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/InceptionV3_pretrained.pdparams"
}
__all__ = MODEL_URLS.keys()
'''
InceptionV3 config: dict.
key: inception blocks of InceptionV3.
values: conv num in different blocks.
'''
NET_CONFIG = {
'inception_a':[[192, 256, 288], [32, 64, 64]],
'inception_b':[288],
'inception_c':[[768, 768, 768, 768], [128, 160, 160, 192]],
'inception_d':[768],
'inception_e':[1280,2048]
"inception_a": [[192, 256, 288], [32, 64, 64]],
"inception_b": [288],
"inception_c": [[768, 768, 768, 768], [128, 160, 160, 192]],
"inception_d": [768],
"inception_e": [1280, 2048]
}
class ConvBNLayer(TheseusLayer):
def __init__(self,
num_channels,
......@@ -55,7 +53,7 @@ class ConvBNLayer(TheseusLayer):
padding=0,
groups=1,
act="relu"):
super(ConvBNLayer, self).__init__()
super().__init__()
self.act = act
self.conv = Conv2D(
in_channels=num_channels,
......@@ -65,89 +63,97 @@ class ConvBNLayer(TheseusLayer):
padding=padding,
groups=groups,
bias_attr=False)
self.batch_norm = BatchNorm(
num_filters)
self.bn = BatchNorm(num_filters)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.batch_norm(x)
x = self.bn(x)
if self.act:
x = self.relu(x)
return x
class InceptionStem(TheseusLayer):
def __init__(self):
super(InceptionStem, self).__init__()
self.conv_1a_3x3 = ConvBNLayer(num_channels=3,
super().__init__()
self.conv_1a_3x3 = ConvBNLayer(
num_channels=3,
num_filters=32,
filter_size=3,
stride=2,
act="relu")
self.conv_2a_3x3 = ConvBNLayer(num_channels=32,
self.conv_2a_3x3 = ConvBNLayer(
num_channels=32,
num_filters=32,
filter_size=3,
stride=1,
act="relu")
self.conv_2b_3x3 = ConvBNLayer(num_channels=32,
self.conv_2b_3x3 = ConvBNLayer(
num_channels=32,
num_filters=64,
filter_size=3,
padding=1,
act="relu")
self.maxpool = MaxPool2D(kernel_size=3, stride=2, padding=0)
self.conv_3b_1x1 = ConvBNLayer(num_channels=64,
num_filters=80,
filter_size=1,
act="relu")
self.conv_4a_3x3 = ConvBNLayer(num_channels=80,
num_filters=192,
filter_size=3,
act="relu")
self.max_pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
self.conv_3b_1x1 = ConvBNLayer(
num_channels=64, num_filters=80, filter_size=1, act="relu")
self.conv_4a_3x3 = ConvBNLayer(
num_channels=80, num_filters=192, filter_size=3, act="relu")
def forward(self, x):
x = self.conv_1a_3x3(x)
x = self.conv_2a_3x3(x)
x = self.conv_2b_3x3(x)
x = self.maxpool(x)
x = self.max_pool(x)
x = self.conv_3b_1x1(x)
x = self.conv_4a_3x3(x)
x = self.maxpool(x)
x = self.max_pool(x)
return x
class InceptionA(TheseusLayer):
def __init__(self, num_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = ConvBNLayer(num_channels=num_channels,
super().__init__()
self.branch1x1 = ConvBNLayer(
num_channels=num_channels,
num_filters=64,
filter_size=1,
act="relu")
self.branch5x5_1 = ConvBNLayer(num_channels=num_channels,
self.branch5x5_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=48,
filter_size=1,
act="relu")
self.branch5x5_2 = ConvBNLayer(num_channels=48,
self.branch5x5_2 = ConvBNLayer(
num_channels=48,
num_filters=64,
filter_size=5,
padding=2,
act="relu")
self.branch3x3dbl_1 = ConvBNLayer(num_channels=num_channels,
self.branch3x3dbl_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=64,
filter_size=1,
act="relu")
self.branch3x3dbl_2 = ConvBNLayer(num_channels=64,
self.branch3x3dbl_2 = ConvBNLayer(
num_channels=64,
num_filters=96,
filter_size=3,
padding=1,
act="relu")
self.branch3x3dbl_3 = ConvBNLayer(num_channels=96,
self.branch3x3dbl_3 = ConvBNLayer(
num_channels=96,
num_filters=96,
filter_size=3,
padding=1,
act="relu")
self.branch_pool = AvgPool2D(kernel_size=3, stride=1, padding=1, exclusive=False)
self.branch_pool_conv = ConvBNLayer(num_channels=num_channels,
self.branch_pool = AvgPool2D(
kernel_size=3, stride=1, padding=1, exclusive=False)
self.branch_pool_conv = ConvBNLayer(
num_channels=num_channels,
num_filters=pool_features,
filter_size=1,
act="relu")
......@@ -163,28 +169,33 @@ class InceptionA(TheseusLayer):
branch_pool = self.branch_pool(x)
branch_pool = self.branch_pool_conv(branch_pool)
x = paddle.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
x = paddle.concat(
[branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
return x
class InceptionB(TheseusLayer):
def __init__(self, num_channels):
super(InceptionB, self).__init__()
self.branch3x3 = ConvBNLayer(num_channels=num_channels,
super().__init__()
self.branch3x3 = ConvBNLayer(
num_channels=num_channels,
num_filters=384,
filter_size=3,
stride=2,
act="relu")
self.branch3x3dbl_1 = ConvBNLayer(num_channels=num_channels,
self.branch3x3dbl_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=64,
filter_size=1,
act="relu")
self.branch3x3dbl_2 = ConvBNLayer(num_channels=64,
self.branch3x3dbl_2 = ConvBNLayer(
num_channels=64,
num_filters=96,
filter_size=3,
padding=1,
act="relu")
self.branch3x3dbl_3 = ConvBNLayer(num_channels=96,
self.branch3x3dbl_3 = ConvBNLayer(
num_channels=96,
num_filters=96,
filter_size=3,
stride=2,
......@@ -204,60 +215,71 @@ class InceptionB(TheseusLayer):
return x
class InceptionC(TheseusLayer):
def __init__(self, num_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = ConvBNLayer(num_channels=num_channels,
super().__init__()
self.branch1x1 = ConvBNLayer(
num_channels=num_channels,
num_filters=192,
filter_size=1,
act="relu")
self.branch7x7_1 = ConvBNLayer(num_channels=num_channels,
self.branch7x7_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=channels_7x7,
filter_size=1,
stride=1,
act="relu")
self.branch7x7_2 = ConvBNLayer(num_channels=channels_7x7,
self.branch7x7_2 = ConvBNLayer(
num_channels=channels_7x7,
num_filters=channels_7x7,
filter_size=(1, 7),
stride=1,
padding=(0, 3),
act="relu")
self.branch7x7_3 = ConvBNLayer(num_channels=channels_7x7,
self.branch7x7_3 = ConvBNLayer(
num_channels=channels_7x7,
num_filters=192,
filter_size=(7, 1),
stride=1,
padding=(3, 0),
act="relu")
self.branch7x7dbl_1 = ConvBNLayer(num_channels=num_channels,
self.branch7x7dbl_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=channels_7x7,
filter_size=1,
act="relu")
self.branch7x7dbl_2 = ConvBNLayer(num_channels=channels_7x7,
self.branch7x7dbl_2 = ConvBNLayer(
num_channels=channels_7x7,
num_filters=channels_7x7,
filter_size=(7, 1),
padding = (3, 0),
padding=(3, 0),
act="relu")
self.branch7x7dbl_3 = ConvBNLayer(num_channels=channels_7x7,
self.branch7x7dbl_3 = ConvBNLayer(
num_channels=channels_7x7,
num_filters=channels_7x7,
filter_size=(1, 7),
padding = (0, 3),
padding=(0, 3),
act="relu")
self.branch7x7dbl_4 = ConvBNLayer(num_channels=channels_7x7,
self.branch7x7dbl_4 = ConvBNLayer(
num_channels=channels_7x7,
num_filters=channels_7x7,
filter_size=(7, 1),
padding = (3, 0),
padding=(3, 0),
act="relu")
self.branch7x7dbl_5 = ConvBNLayer(num_channels=channels_7x7,
self.branch7x7dbl_5 = ConvBNLayer(
num_channels=channels_7x7,
num_filters=192,
filter_size=(1, 7),
padding = (0, 3),
padding=(0, 3),
act="relu")
self.branch_pool = AvgPool2D(kernel_size=3, stride=1, padding=1, exclusive=False)
self.branch_pool_conv = ConvBNLayer(num_channels=num_channels,
self.branch_pool = AvgPool2D(
kernel_size=3, stride=1, padding=1, exclusive=False)
self.branch_pool_conv = ConvBNLayer(
num_channels=num_channels,
num_filters=192,
filter_size=1,
act="relu")
......@@ -278,37 +300,45 @@ class InceptionC(TheseusLayer):
branch_pool = self.branch_pool(x)
branch_pool = self.branch_pool_conv(branch_pool)
x = paddle.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
x = paddle.concat(
[branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
return x
class InceptionD(TheseusLayer):
def __init__(self, num_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = ConvBNLayer(num_channels=num_channels,
super().__init__()
self.branch3x3_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=192,
filter_size=1,
act="relu")
self.branch3x3_2 = ConvBNLayer(num_channels=192,
self.branch3x3_2 = ConvBNLayer(
num_channels=192,
num_filters=320,
filter_size=3,
stride=2,
act="relu")
self.branch7x7x3_1 = ConvBNLayer(num_channels=num_channels,
self.branch7x7x3_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=192,
filter_size=1,
act="relu")
self.branch7x7x3_2 = ConvBNLayer(num_channels=192,
self.branch7x7x3_2 = ConvBNLayer(
num_channels=192,
num_filters=192,
filter_size=(1, 7),
padding=(0, 3),
act="relu")
self.branch7x7x3_3 = ConvBNLayer(num_channels=192,
self.branch7x7x3_3 = ConvBNLayer(
num_channels=192,
num_filters=192,
filter_size=(7, 1),
padding=(3, 0),
act="relu")
self.branch7x7x3_4 = ConvBNLayer(num_channels=192,
self.branch7x7x3_4 = ConvBNLayer(
num_channels=192,
num_filters=192,
filter_size=3,
stride=2,
......@@ -329,52 +359,64 @@ class InceptionD(TheseusLayer):
x = paddle.concat([branch3x3, branch7x7x3, branch_pool], axis=1)
return x
class InceptionE(TheseusLayer):
def __init__(self, num_channels):
super(InceptionE, self).__init__()
self.branch1x1 = ConvBNLayer(num_channels=num_channels,
super().__init__()
self.branch1x1 = ConvBNLayer(
num_channels=num_channels,
num_filters=320,
filter_size=1,
act="relu")
self.branch3x3_1 = ConvBNLayer(num_channels=num_channels,
self.branch3x3_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=384,
filter_size=1,
act="relu")
self.branch3x3_2a = ConvBNLayer(num_channels=384,
self.branch3x3_2a = ConvBNLayer(
num_channels=384,
num_filters=384,
filter_size=(1, 3),
padding=(0, 1),
act="relu")
self.branch3x3_2b = ConvBNLayer(num_channels=384,
self.branch3x3_2b = ConvBNLayer(
num_channels=384,
num_filters=384,
filter_size=(3, 1),
padding=(1, 0),
act="relu")
self.branch3x3dbl_1 = ConvBNLayer(num_channels=num_channels,
self.branch3x3dbl_1 = ConvBNLayer(
num_channels=num_channels,
num_filters=448,
filter_size=1,
act="relu")
self.branch3x3dbl_2 = ConvBNLayer(num_channels=448,
self.branch3x3dbl_2 = ConvBNLayer(
num_channels=448,
num_filters=384,
filter_size=3,
padding=1,
act="relu")
self.branch3x3dbl_3a = ConvBNLayer(num_channels=384,
self.branch3x3dbl_3a = ConvBNLayer(
num_channels=384,
num_filters=384,
filter_size=(1, 3),
padding=(0, 1),
act="relu")
self.branch3x3dbl_3b = ConvBNLayer(num_channels=384,
self.branch3x3dbl_3b = ConvBNLayer(
num_channels=384,
num_filters=384,
filter_size=(3, 1),
padding=(1, 0),
act="relu")
self.branch_pool = AvgPool2D(kernel_size=3, stride=1, padding=1, exclusive=False)
self.branch_pool_conv = ConvBNLayer(num_channels=num_channels,
self.branch_pool = AvgPool2D(
kernel_size=3, stride=1, padding=1, exclusive=False)
self.branch_pool_conv = ConvBNLayer(
num_channels=num_channels,
num_filters=192,
filter_size=1,
act="relu")
def forward(self, x):
branch1x1 = self.branch1x1(x)
......@@ -396,7 +438,8 @@ class InceptionE(TheseusLayer):
branch_pool = self.branch_pool(x)
branch_pool = self.branch_pool_conv(branch_pool)
x = paddle.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
x = paddle.concat(
[branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
return x
......@@ -410,19 +453,15 @@ class Inception_V3(TheseusLayer):
Returns:
model: nn.Layer. Specific Inception_V3 model depends on args.
"""
def __init__(self,
config,
class_num=1000,
pretrained=False,
**kwargs):
super(Inception_V3, self).__init__()
self.inception_a_list = config['inception_a']
self.inception_c_list = config['inception_c']
self.inception_b_list = config['inception_b']
self.inception_d_list = config['inception_d']
self.inception_e_list = config ['inception_e']
self.pretrained = pretrained
def __init__(self, config, class_num=1000):
super().__init__()
self.inception_a_list = config["inception_a"]
self.inception_c_list = config["inception_c"]
self.inception_b_list = config["inception_b"]
self.inception_d_list = config["inception_d"]
self.inception_e_list = config["inception_e"]
self.inception_stem = InceptionStem()
......@@ -455,8 +494,7 @@ class Inception_V3(TheseusLayer):
self.fc = Linear(
2048,
class_num,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv)),
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr())
def forward(self, x):
......@@ -470,25 +508,29 @@ class Inception_V3(TheseusLayer):
return x
def InceptionV3(**kwargs):
def _load_pretrained(pretrained, model, model_url, use_ssld):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def InceptionV3(pretrained=False, use_ssld=False, **kwargs):
"""
InceptionV3
Args:
kwargs:
class_num: int=1000. Output dim of last fc layer.
pretrained: bool or str, default: bool=False. Whether to load the pretrained model.
pretrained: bool=false or str. if `true` load pretrained parameters, `false` otherwise.
if str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `InceptionV3` model
"""
model = Inception_V3(NET_CONFIG, **kwargs)
if isinstance(model.pretrained, bool):
if model.pretrained is True:
load_dygraph_pretrain_from_url(model, MODEL_URLS["InceptionV3"])
elif isinstance(model.pretrained, str):
load_dygraph_pretrain(model, model.pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type")
_load_pretrained(pretrained, model, MODEL_URLS["InceptionV3"], use_ssld)
return model
......@@ -14,8 +14,6 @@
from __future__ import absolute_import, division, print_function
import numpy as np
import paddle
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn import Conv2D, BatchNorm, Linear, ReLU, Flatten
......@@ -23,14 +21,17 @@ from paddle.nn import AdaptiveAvgPool2D
from paddle.nn.initializer import KaimingNormal
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
from ppcls.utils.save_load import load_dygraph_pretrain_from, load_dygraph_pretrain_from_url
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"MobileNetV1_x0_25": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV1_x0_25_pretrained.pdparams",
"MobileNetV1_x0_5": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV1_x0_5_pretrained.pdparams",
"MobileNetV1_x0_75": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV1_x0_75_pretrained.pdparams",
"MobileNetV1": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV1_pretrained.pdparams",
"MobileNetV1_x0_25":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_25_pretrained.pdparams",
"MobileNetV1_x0_5":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_5_pretrained.pdparams",
"MobileNetV1_x0_75":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_x0_75_pretrained.pdparams",
"MobileNetV1":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV1_pretrained.pdparams"
}
__all__ = MODEL_URLS.keys()
......@@ -44,7 +45,7 @@ class ConvBNLayer(TheseusLayer):
stride,
padding,
num_groups=1):
super(ConvBNLayer, self).__init__()
super().__init__()
self.conv = Conv2D(
in_channels=num_channels,
......@@ -55,9 +56,7 @@ class ConvBNLayer(TheseusLayer):
groups=num_groups,
weight_attr=ParamAttr(initializer=KaimingNormal()),
bias_attr=False)
self.bn = BatchNorm(num_filters)
self.relu = ReLU()
def forward(self, x):
......@@ -68,14 +67,9 @@ class ConvBNLayer(TheseusLayer):
class DepthwiseSeparable(TheseusLayer):
def __init__(self,
num_channels,
num_filters1,
num_filters2,
num_groups,
stride,
scale):
super(DepthwiseSeparable, self).__init__()
def __init__(self, num_channels, num_filters1, num_filters2, num_groups,
stride, scale):
super().__init__()
self.depthwise_conv = ConvBNLayer(
num_channels=num_channels,
......@@ -99,10 +93,18 @@ class DepthwiseSeparable(TheseusLayer):
class MobileNet(TheseusLayer):
def __init__(self, scale=1.0, class_num=1000, pretrained=False):
super(MobileNet, self).__init__()
"""
MobileNet
Args:
scale: float=1.0. The coefficient that controls the size of network parameters.
class_num: int=1000. The number of classes.
Returns:
model: nn.Layer. Specific MobileNet model depends on args.
"""
def __init__(self, scale=1.0, class_num=1000):
super().__init__()
self.scale = scale
self.pretrained = pretrained
self.conv = ConvBNLayer(
num_channels=3,
......@@ -133,7 +135,8 @@ class MobileNet(TheseusLayer):
num_filters2=params[2],
num_groups=params[3],
stride=params[4],
scale=scale) for params in self.cfg])
scale=scale) for params in self.cfg
])
self.avg_pool = AdaptiveAvgPool2D(1)
self.flatten = Flatten(start_axis=1, stop_axis=-1)
......@@ -152,91 +155,77 @@ class MobileNet(TheseusLayer):
return x
def MobileNetV1_x0_25(**args):
def _load_pretrained(pretrained, model, model_url, use_ssld):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def MobileNetV1_x0_25(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_25
Args:
pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
kwargs:
class_num: int=1000. Output dim of last fc layer.
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args.
"""
model = MobileNet(scale=0.25, **args)
if isinstance(model.pretrained, bool):
if model.pretrained is True:
load_dygraph_pretrain_from_url(model, MODEL_URLS["MobileNetV1_x0_25"])
elif isinstance(model.pretrained, str):
load_dygraph_pretrain(model, model.pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type")
model = MobileNet(scale=0.25, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_25"],
use_ssld)
return model
def MobileNetV1_x0_5(**args):
def MobileNetV1_x0_5(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_5
Args:
pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
kwargs:
class_num: int=1000. Output dim of last fc layer.
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args.
"""
model = MobileNet(scale=0.5, **args)
if isinstance(model.pretrained, bool):
if model.pretrained is True:
load_dygraph_pretrain_from_url(model, MODEL_URLS["MobileNetV1_x0_5"])
elif isinstance(model.pretrained, str):
load_dygraph_pretrain(model, model.pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type")
model = MobileNet(scale=0.5, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_5"],
use_ssld)
return model
def MobileNetV1_x0_75(**args):
def MobileNetV1_x0_75(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1_x0_75
Args:
pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
kwargs:
class_num: int=1000. Output dim of last fc layer.
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args.
"""
model = MobileNet(scale=0.75, **args)
if isinstance(model.pretrained, bool):
if model.pretrained is True:
load_dygraph_pretrain_from_url(model, MODEL_URLS["MobileNetV1_x0_75"])
elif isinstance(model.pretrained, str):
load_dygraph_pretrain(model, model.pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type")
model = MobileNet(scale=0.75, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1_x0_75"],
use_ssld)
return model
def MobileNetV1(**args):
def MobileNetV1(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV1
Args:
pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
kwargs:
class_num: int=1000. Output dim of last fc layer.
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV1` model depends on args.
"""
model = MobileNet(scale=1.0, **args)
if isinstance(model.pretrained, bool):
if model.pretrained is True:
load_dygraph_pretrain_from_url(model, MODEL_URLS["MobileNetV1"])
elif isinstance(model.pretrained, str):
load_dygraph_pretrain(model, model.pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type")
model = MobileNet(scale=1.0, **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV1"], use_ssld)
return model
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import paddle
import paddle.nn as nn
from paddle import ParamAttr
from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear
from paddle.regularizer import L2Decay
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"MobileNetV3_small_x0_35":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_35_pretrained.pdparams",
"MobileNetV3_small_x0_5":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_5_pretrained.pdparams",
"MobileNetV3_small_x0_75":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x0_75_pretrained.pdparams",
"MobileNetV3_small_x1_0":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_0_pretrained.pdparams",
"MobileNetV3_small_x1_25":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_small_x1_25_pretrained.pdparams",
"MobileNetV3_large_x0_35":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_35_pretrained.pdparams",
"MobileNetV3_large_x0_5":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_5_pretrained.pdparams",
"MobileNetV3_large_x0_75":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x0_75_pretrained.pdparams",
"MobileNetV3_large_x1_0":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_0_pretrained.pdparams",
"MobileNetV3_large_x1_25":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/MobileNetV3_large_x1_25_pretrained.pdparams",
}
__all__ = MODEL_URLS.keys()
# "large", "small" is just for MobinetV3_large, MobileNetV3_small respectively.
# The type of "large" or "small" config is a list. Each element(list) represents a depthwise block, which is composed of k, exp, se, act, s.
# k: kernel_size
# exp: middle channel number in depthwise block
# c: output channel number in depthwise block
# se: whether to use SE block
# act: which activation to use
# s: stride in depthwise block
NET_CONFIG = {
"large": [
# k, exp, c, se, act, s
[3, 16, 16, False, "relu", 1],
[3, 64, 24, False, "relu", 2],
[3, 72, 24, False, "relu", 1],
[5, 72, 40, True, "relu", 2],
[5, 120, 40, True, "relu", 1],
[5, 120, 40, True, "relu", 1],
[3, 240, 80, False, "hardswish", 2],
[3, 200, 80, False, "hardswish", 1],
[3, 184, 80, False, "hardswish", 1],
[3, 184, 80, False, "hardswish", 1],
[3, 480, 112, True, "hardswish", 1],
[3, 672, 112, True, "hardswish", 1],
[5, 672, 160, True, "hardswish", 2],
[5, 960, 160, True, "hardswish", 1],
[5, 960, 160, True, "hardswish", 1],
],
"small": [
# k, exp, c, se, act, s
[3, 16, 16, True, "relu", 2],
[3, 72, 24, False, "relu", 2],
[3, 88, 24, False, "relu", 1],
[5, 96, 40, True, "hardswish", 2],
[5, 240, 40, True, "hardswish", 1],
[5, 240, 40, True, "hardswish", 1],
[5, 120, 48, True, "hardswish", 1],
[5, 144, 48, True, "hardswish", 1],
[5, 288, 96, True, "hardswish", 2],
[5, 576, 96, True, "hardswish", 1],
[5, 576, 96, True, "hardswish", 1],
]
}
# first conv output channel number in MobileNetV3
STEM_CONV_NUMBER = 16
# last second conv output channel for "small"
LAST_SECOND_CONV_SMALL = 576
# last second conv output channel for "large"
LAST_SECOND_CONV_LARGE = 960
# last conv output channel number for "large" and "small"
LAST_CONV = 1280
def _make_divisible(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _create_act(act):
if act == "hardswish":
return nn.Hardswish()
elif act == "relu":
return nn.ReLU()
elif act is None:
return None
else:
raise RuntimeError(
"The activation function is not supported: {}".format(act))
class MobileNetV3(TheseusLayer):
"""
MobileNetV3
Args:
config: list. MobileNetV3 depthwise blocks config.
scale: float=1.0. The coefficient that controls the size of network parameters.
class_num: int=1000. The number of classes.
inplanes: int=16. The output channel number of first convolution layer.
class_squeeze: int=960. The output channel number of penultimate convolution layer.
class_expand: int=1280. The output channel number of last convolution layer.
dropout_prob: float=0.2. Probability of setting units to zero.
Returns:
model: nn.Layer. Specific MobileNetV3 model depends on args.
"""
def __init__(self,
config,
scale=1.0,
class_num=1000,
inplanes=STEM_CONV_NUMBER,
class_squeeze=LAST_SECOND_CONV_LARGE,
class_expand=LAST_CONV,
dropout_prob=0.2):
super().__init__()
self.cfg = config
self.scale = scale
self.inplanes = inplanes
self.class_squeeze = class_squeeze
self.class_expand = class_expand
self.class_num = class_num
self.conv = ConvBNLayer(
in_c=3,
out_c=_make_divisible(self.inplanes * self.scale),
filter_size=3,
stride=2,
padding=1,
num_groups=1,
if_act=True,
act="hardswish")
self.blocks = nn.Sequential(*[
ResidualUnit(
in_c=_make_divisible(self.inplanes * self.scale if i == 0 else
self.cfg[i - 1][2] * self.scale),
mid_c=_make_divisible(self.scale * exp),
out_c=_make_divisible(self.scale * c),
filter_size=k,
stride=s,
use_se=se,
act=act) for i, (k, exp, c, se, act, s) in enumerate(self.cfg)
])
self.last_second_conv = ConvBNLayer(
in_c=_make_divisible(self.cfg[-1][2] * self.scale),
out_c=_make_divisible(self.scale * self.class_squeeze),
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True,
act="hardswish")
self.avg_pool = AdaptiveAvgPool2D(1)
self.last_conv = Conv2D(
in_channels=_make_divisible(self.scale * self.class_squeeze),
out_channels=self.class_expand,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.hardswish = nn.Hardswish()
self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
self.fc = Linear(self.class_expand, class_num)
def forward(self, x):
x = self.conv(x)
x = self.blocks(x)
x = self.last_second_conv(x)
x = self.avg_pool(x)
x = self.last_conv(x)
x = self.hardswish(x)
x = self.dropout(x)
x = self.flatten(x)
x = self.fc(x)
return x
class ConvBNLayer(TheseusLayer):
def __init__(self,
in_c,
out_c,
filter_size,
stride,
padding,
num_groups=1,
if_act=True,
act=None):
super().__init__()
self.conv = Conv2D(
in_channels=in_c,
out_channels=out_c,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
bias_attr=False)
self.bn = BatchNorm(
num_channels=out_c,
act=None,
param_attr=ParamAttr(regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
self.if_act = if_act
self.act = _create_act(act)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.if_act:
x = self.act(x)
return x
class ResidualUnit(TheseusLayer):
def __init__(self,
in_c,
mid_c,
out_c,
filter_size,
stride,
use_se,
act=None):
super().__init__()
self.if_shortcut = stride == 1 and in_c == out_c
self.if_se = use_se
self.expand_conv = ConvBNLayer(
in_c=in_c,
out_c=mid_c,
filter_size=1,
stride=1,
padding=0,
if_act=True,
act=act)
self.bottleneck_conv = ConvBNLayer(
in_c=mid_c,
out_c=mid_c,
filter_size=filter_size,
stride=stride,
padding=int((filter_size - 1) // 2),
num_groups=mid_c,
if_act=True,
act=act)
if self.if_se:
self.mid_se = SEModule(mid_c)
self.linear_conv = ConvBNLayer(
in_c=mid_c,
out_c=out_c,
filter_size=1,
stride=1,
padding=0,
if_act=False,
act=None)
def forward(self, x):
identity = x
x = self.expand_conv(x)
x = self.bottleneck_conv(x)
if self.if_se:
x = self.mid_se(x)
x = self.linear_conv(x)
if self.if_shortcut:
x = paddle.add(identity, x)
return x
# nn.Hardsigmoid can't transfer "slope" and "offset" in nn.functional.hardsigmoid
class Hardsigmoid(TheseusLayer):
def __init__(self, slope=0.2, offset=0.5):
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
return nn.functional.hardsigmoid(
x, slope=self.slope, offset=self.offset)
class SEModule(TheseusLayer):
def __init__(self, channel, reduction=4):
super().__init__()
self.avg_pool = AdaptiveAvgPool2D(1)
self.conv1 = Conv2D(
in_channels=channel,
out_channels=channel // reduction,
kernel_size=1,
stride=1,
padding=0)
self.relu = nn.ReLU()
self.conv2 = Conv2D(
in_channels=channel // reduction,
out_channels=channel,
kernel_size=1,
stride=1,
padding=0)
self.hardsigmoid = Hardsigmoid(slope=0.2, offset=0.5)
def forward(self, x):
identity = x
x = self.avg_pool(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.hardsigmoid(x)
return paddle.multiply(x=identity, y=x)
def _load_pretrained(pretrained, model, model_url, use_ssld):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def MobileNetV3_small_x0_35(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_small_x0_35
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["small"],
scale=0.35,
class_squeeze=LAST_SECOND_CONV_SMALL,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_35"],
use_ssld)
return model
def MobileNetV3_small_x0_5(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_small_x0_5
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["small"],
scale=0.5,
class_squeeze=LAST_SECOND_CONV_SMALL,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_5"],
use_ssld)
return model
def MobileNetV3_small_x0_75(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_small_x0_75
Args:
pretrained: bool=false or str. if `true` load pretrained parameters, `false` otherwise.
if str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["small"],
scale=0.75,
class_squeeze=LAST_SECOND_CONV_SMALL,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x0_75"],
use_ssld)
return model
def MobileNetV3_small_x1_0(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_small_x1_0
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["small"],
scale=1.0,
class_squeeze=LAST_SECOND_CONV_SMALL,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x1_0"],
use_ssld)
return model
def MobileNetV3_small_x1_25(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_small_x1_25
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["small"],
scale=1.25,
class_squeeze=LAST_SECOND_CONV_SMALL,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_small_x1_25"],
use_ssld)
return model
def MobileNetV3_large_x0_35(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_large_x0_35
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["large"],
scale=0.35,
class_squeeze=LAST_SECOND_CONV_LARGE,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_35"],
use_ssld)
return model
def MobileNetV3_large_x0_5(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_large_x0_5
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["large"],
scale=0.5,
class_squeeze=LAST_SECOND_CONV_LARGE,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_5"],
use_ssld)
return model
def MobileNetV3_large_x0_75(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_large_x0_75
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["large"],
scale=0.75,
class_squeeze=LAST_SECOND_CONV_LARGE,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x0_75"],
use_ssld)
return model
def MobileNetV3_large_x1_0(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_large_x1_0
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["large"],
scale=1.0,
class_squeeze=LAST_SECOND_CONV_LARGE,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x1_0"],
use_ssld)
return model
def MobileNetV3_large_x1_25(pretrained=False, use_ssld=False, **kwargs):
"""
MobileNetV3_large_x1_25
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args.
"""
model = MobileNetV3(
config=NET_CONFIG["large"],
scale=1.25,
class_squeeze=LAST_SECOND_CONV_LARGE,
**kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["MobileNetV3_large_x1_25"],
use_ssld)
return model
......@@ -14,16 +14,24 @@
from __future__ import absolute_import, division, print_function
import paddle
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import MaxPool2D
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
from ppcls.utils.save_load import load_dygraph_pretrain
__all__ = ["VGG11", "VGG13", "VGG16", "VGG19"]
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"VGG11":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG11_pretrained.pdparams",
"VGG13":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG13_pretrained.pdparams",
"VGG16":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG16_pretrained.pdparams",
"VGG19":
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/VGG19_pretrained.pdparams",
}
__all__ = MODEL_URLS.keys()
# VGG config
# key: VGG network depth
......@@ -36,68 +44,12 @@ NET_CONFIG = {
}
def VGG11(**args):
"""
VGG11
Args:
kwargs:
class_num: int=1000. Output dim of last fc layer.
stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
Returns:
model: nn.Layer. Specific `VGG11` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[11], **args)
return model
def VGG13(**args):
"""
VGG13
Args:
kwargs:
class_num: int=1000. Output dim of last fc layer.
stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
Returns:
model: nn.Layer. Specific `VGG11` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[13], **args)
return model
def VGG16(**args):
"""
VGG16
Args:
kwargs:
class_num: int=1000. Output dim of last fc layer.
stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
Returns:
model: nn.Layer. Specific `VGG11` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[16], **args)
return model
def VGG19(**args):
"""
VGG19
Args:
kwargs:
class_num: int=1000. Output dim of last fc layer.
stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
Returns:
model: nn.Layer. Specific `VGG11` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[19], **args)
return model
class ConvBlock(TheseusLayer):
def __init__(self, input_channels, output_channels, groups):
super(ConvBlock, self).__init__()
super().__init__()
self.groups = groups
self._conv_1 = Conv2D(
self.conv1 = Conv2D(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=3,
......@@ -105,7 +57,7 @@ class ConvBlock(TheseusLayer):
padding=1,
bias_attr=False)
if groups == 2 or groups == 3 or groups == 4:
self._conv_2 = Conv2D(
self.conv2 = Conv2D(
in_channels=output_channels,
out_channels=output_channels,
kernel_size=3,
......@@ -113,7 +65,7 @@ class ConvBlock(TheseusLayer):
padding=1,
bias_attr=False)
if groups == 3 or groups == 4:
self._conv_3 = Conv2D(
self.conv3 = Conv2D(
in_channels=output_channels,
out_channels=output_channels,
kernel_size=3,
......@@ -121,7 +73,7 @@ class ConvBlock(TheseusLayer):
padding=1,
bias_attr=False)
if groups == 4:
self._conv_4 = Conv2D(
self.conv4 = Conv2D(
in_channels=output_channels,
out_channels=output_channels,
kernel_size=3,
......@@ -129,73 +81,148 @@ class ConvBlock(TheseusLayer):
padding=1,
bias_attr=False)
self._pool = MaxPool2D(kernel_size=2, stride=2, padding=0)
self._relu = nn.ReLU()
self.max_pool = MaxPool2D(kernel_size=2, stride=2, padding=0)
self.relu = nn.ReLU()
def forward(self, inputs):
x = self._conv_1(inputs)
x = self._relu(x)
x = self.conv1(inputs)
x = self.relu(x)
if self.groups == 2 or self.groups == 3 or self.groups == 4:
x = self._conv_2(x)
x = self._relu(x)
x = self.conv2(x)
x = self.relu(x)
if self.groups == 3 or self.groups == 4:
x = self._conv_3(x)
x = self._relu(x)
x = self.conv3(x)
x = self.relu(x)
if self.groups == 4:
x = self._conv_4(x)
x = self._relu(x)
x = self._pool(x)
x = self.conv4(x)
x = self.relu(x)
x = self.max_pool(x)
return x
class VGGNet(TheseusLayer):
def __init__(self,
config,
stop_grad_layers=0,
class_num=1000,
pretrained=False,
**args):
"""
VGGNet
Args:
config: list. VGGNet config.
stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
class_num: int=1000. The number of classes.
Returns:
model: nn.Layer. Specific VGG model depends on args.
"""
def __init__(self, config, stop_grad_layers=0, class_num=1000):
super().__init__()
self.stop_grad_layers = stop_grad_layers
self._conv_block_1 = ConvBlock(3, 64, config[0])
self._conv_block_2 = ConvBlock(64, 128, config[1])
self._conv_block_3 = ConvBlock(128, 256, config[2])
self._conv_block_4 = ConvBlock(256, 512, config[3])
self._conv_block_5 = ConvBlock(512, 512, config[4])
self.conv_block_1 = ConvBlock(3, 64, config[0])
self.conv_block_2 = ConvBlock(64, 128, config[1])
self.conv_block_3 = ConvBlock(128, 256, config[2])
self.conv_block_4 = ConvBlock(256, 512, config[3])
self.conv_block_5 = ConvBlock(512, 512, config[4])
self._relu = nn.ReLU()
self._flatten = nn.Flatten(start_axis=1, stop_axis=-1)
self.relu = nn.ReLU()
self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
for idx, block in enumerate([
self._conv_block_1, self._conv_block_2, self._conv_block_3,
self._conv_block_4, self._conv_block_5
self.conv_block_1, self.conv_block_2, self.conv_block_3,
self.conv_block_4, self.conv_block_5
]):
if self.stop_grad_layers >= idx + 1:
for param in block.parameters():
param.trainable = False
self._drop = Dropout(p=0.5, mode="downscale_in_infer")
self._fc1 = Linear(7 * 7 * 512, 4096)
self._fc2 = Linear(4096, 4096)
self._out = Linear(4096, class_num)
if pretrained is not None:
load_dygraph_pretrain(self, pretrained)
self.drop = Dropout(p=0.5, mode="downscale_in_infer")
self.fc1 = Linear(7 * 7 * 512, 4096)
self.fc2 = Linear(4096, 4096)
self.fc3 = Linear(4096, class_num)
def forward(self, inputs):
x = self._conv_block_1(inputs)
x = self._conv_block_2(x)
x = self._conv_block_3(x)
x = self._conv_block_4(x)
x = self._conv_block_5(x)
x = self._flatten(x)
x = self._fc1(x)
x = self._relu(x)
x = self._drop(x)
x = self._fc2(x)
x = self._relu(x)
x = self._drop(x)
x = self._out(x)
x = self.conv_block_1(inputs)
x = self.conv_block_2(x)
x = self.conv_block_3(x)
x = self.conv_block_4(x)
x = self.conv_block_5(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.drop(x)
x = self.fc2(x)
x = self.relu(x)
x = self.drop(x)
x = self.fc3(x)
return x
def _load_pretrained(pretrained, model, model_url, use_ssld):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def VGG11(pretrained=False, use_ssld=False, **kwargs):
"""
VGG11
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `VGG11` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[11], **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["VGG11"], use_ssld)
return model
def VGG13(pretrained=False, use_ssld=False, **kwargs):
"""
VGG13
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `VGG13` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[13], **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["VGG13"], use_ssld)
return model
def VGG16(pretrained=False, use_ssld=False, **kwargs):
"""
VGG16
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `VGG16` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[16], **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["VGG16"], use_ssld)
return model
def VGG19(pretrained=False, use_ssld=False, **kwargs):
"""
VGG19
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `VGG19` model depends on args.
"""
model = VGGNet(config=NET_CONFIG[19], **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["VGG19"], use_ssld)
return model
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册