提交 5d9d2395 编写于 作者: C cuicheng01

update resnet&pp-lcnet

上级 c2daa752
......@@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function
import paddle
import paddle.nn as nn
from paddle import ParamAttr
from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear
from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Dropout, Linear
from paddle.regularizer import L2Decay
from paddle.nn.initializer import KaimingNormal
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
......@@ -83,7 +83,8 @@ class ConvBNLayer(TheseusLayer):
filter_size,
num_filters,
stride,
num_groups=1):
num_groups=1,
lr_mult=1.0):
super().__init__()
self.conv = Conv2D(
......@@ -93,13 +94,13 @@ class ConvBNLayer(TheseusLayer):
stride=stride,
padding=(filter_size - 1) // 2,
groups=num_groups,
weight_attr=ParamAttr(initializer=KaimingNormal()),
weight_attr=ParamAttr(initializer=KaimingNormal(), learning_rate=lr_mult),
bias_attr=False)
self.bn = BatchNorm(
self.bn = BatchNorm2D(
num_filters,
param_attr=ParamAttr(regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
weight_attr=ParamAttr(regularizer=L2Decay(0.0), learning_rate=lr_mult),
bias_attr=ParamAttr(regularizer=L2Decay(0.0), learning_rate=lr_mult))
self.hardswish = nn.Hardswish()
def forward(self, x):
......@@ -115,7 +116,8 @@ class DepthwiseSeparable(TheseusLayer):
num_filters,
stride,
dw_size=3,
use_se=False):
use_se=False,
lr_mult=1.0):
super().__init__()
self.use_se = use_se
self.dw_conv = ConvBNLayer(
......@@ -123,14 +125,17 @@ class DepthwiseSeparable(TheseusLayer):
num_filters=num_channels,
filter_size=dw_size,
stride=stride,
num_groups=num_channels)
num_groups=num_channels,
lr_mult=lr_mult)
if use_se:
self.se = SEModule(num_channels)
self.se = SEModule(num_channels,
lr_mult=lr_mult)
self.pw_conv = ConvBNLayer(
num_channels=num_channels,
filter_size=1,
num_filters=num_filters,
stride=1)
stride=1,
lr_mult=lr_mult)
def forward(self, x):
x = self.dw_conv(x)
......@@ -141,7 +146,7 @@ class DepthwiseSeparable(TheseusLayer):
class SEModule(TheseusLayer):
def __init__(self, channel, reduction=4):
def __init__(self, channel, reduction=4, lr_mult=1.0):
super().__init__()
self.avg_pool = AdaptiveAvgPool2D(1)
self.conv1 = Conv2D(
......@@ -149,14 +154,18 @@ class SEModule(TheseusLayer):
out_channels=channel // reduction,
kernel_size=1,
stride=1,
padding=0)
padding=0,
weight_attr=ParamAttr(learning_rate=lr_mult),
bias_attr=ParamAttr(learning_rate=lr_mult))
self.relu = nn.ReLU()
self.conv2 = Conv2D(
in_channels=channel // reduction,
out_channels=channel,
kernel_size=1,
stride=1,
padding=0)
padding=0,
weight_attr=ParamAttr(learning_rate=lr_mult),
bias_attr=ParamAttr(learning_rate=lr_mult))
self.hardsigmoid = nn.Hardsigmoid()
def forward(self, x):
......@@ -177,17 +186,44 @@ class PPLCNet(TheseusLayer):
class_num=1000,
dropout_prob=0.2,
class_expand=1280,
lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
stride_list=[2, 2, 2, 2, 2],
use_last_conv=True,
return_patterns=None,
return_stages=None):
super().__init__()
self.scale = scale
self.class_expand = class_expand
self.lr_mult_list = lr_mult_list
self.use_last_conv = use_last_conv
self.stride_list = stride_list
self.net_config = NET_CONFIG
if isinstance(self.lr_mult_list, str):
self.lr_mult_list = eval(self.lr_mult_list)
assert isinstance(self.lr_mult_list, (
list, tuple
)), "lr_mult_list should be in (list, tuple) but got {}".format(
type(self.lr_mult_list))
assert len(self.lr_mult_list
) == 6, "lr_mult_list length should be 6 but got {}".format(
len(self.lr_mult_list))
assert isinstance(self.stride_list, (
list, tuple
)), "stride_list should be in (list, tuple) but got {}".format(
type(self.stride_list))
assert len(self.stride_list
) == 5, "stride_list length should be 5 but got {}".format(
len(self.stride_list))
for i, stride in enumerate(stride_list[1:]):
self.net_config["blocks{}".format(i+3)][0][3] = stride
self.conv1 = ConvBNLayer(
num_channels=3,
filter_size=3,
num_filters=make_divisible(16 * scale),
stride=2)
stride=stride_list[0],
lr_mult=self.lr_mult_list[0])
self.blocks2 = nn.Sequential(* [
DepthwiseSeparable(
......@@ -195,8 +231,9 @@ class PPLCNet(TheseusLayer):
num_filters=make_divisible(out_c * scale),
dw_size=k,
stride=s,
use_se=se)
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks2"])
use_se=se,
lr_mult=self.lr_mult_list[1])
for i, (k, in_c, out_c, s, se) in enumerate(self.net_config["blocks2"])
])
self.blocks3 = nn.Sequential(* [
......@@ -205,8 +242,9 @@ class PPLCNet(TheseusLayer):
num_filters=make_divisible(out_c * scale),
dw_size=k,
stride=s,
use_se=se)
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks3"])
use_se=se,
lr_mult=self.lr_mult_list[2])
for i, (k, in_c, out_c, s, se) in enumerate(self.net_config["blocks3"])
])
self.blocks4 = nn.Sequential(* [
......@@ -215,8 +253,9 @@ class PPLCNet(TheseusLayer):
num_filters=make_divisible(out_c * scale),
dw_size=k,
stride=s,
use_se=se)
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks4"])
use_se=se,
lr_mult=self.lr_mult_list[3])
for i, (k, in_c, out_c, s, se) in enumerate(self.net_config["blocks4"])
])
self.blocks5 = nn.Sequential(* [
......@@ -225,8 +264,9 @@ class PPLCNet(TheseusLayer):
num_filters=make_divisible(out_c * scale),
dw_size=k,
stride=s,
use_se=se)
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks5"])
use_se=se,
lr_mult=self.lr_mult_list[4])
for i, (k, in_c, out_c, s, se) in enumerate(self.net_config["blocks5"])
])
self.blocks6 = nn.Sequential(* [
......@@ -235,25 +275,26 @@ class PPLCNet(TheseusLayer):
num_filters=make_divisible(out_c * scale),
dw_size=k,
stride=s,
use_se=se)
for i, (k, in_c, out_c, s, se) in enumerate(NET_CONFIG["blocks6"])
use_se=se,
lr_mult=self.lr_mult_list[5])
for i, (k, in_c, out_c, s, se) in enumerate(self.net_config["blocks6"])
])
self.avg_pool = AdaptiveAvgPool2D(1)
if self.use_last_conv:
self.last_conv = Conv2D(
in_channels=make_divisible(NET_CONFIG["blocks6"][-1][2] * scale),
in_channels=make_divisible(self.net_config["blocks6"][-1][2] * scale),
out_channels=self.class_expand,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.hardswish = nn.Hardswish()
self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
else:
self.last_conv = None
self.flatten = nn.Flatten(start_axis=1, stop_axis=-1)
self.fc = Linear(self.class_expand, class_num)
self.fc = Linear(self.class_expand if self.use_last_conv else make_divisible(self.net_config["blocks6"][-1][2]), class_num)
super().init_res(
stages_pattern,
......@@ -270,6 +311,7 @@ class PPLCNet(TheseusLayer):
x = self.blocks6(x)
x = self.avg_pool(x)
if self.last_conv is not None:
x = self.last_conv(x)
x = self.hardswish(x)
x = self.dropout(x)
......
......@@ -20,9 +20,10 @@ import numpy as np
import paddle
from paddle import ParamAttr
import paddle.nn as nn
from paddle.nn import Conv2D, BatchNorm, Linear
from paddle.nn import Conv2D, BatchNorm, Linear, BatchNorm2D
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform
from paddle.regularizer import L2Decay
import math
from ppcls.arch.backbone.base.theseus_layer import TheseusLayer
......@@ -121,17 +122,21 @@ class ConvBNLayer(TheseusLayer):
self.is_vd_mode = is_vd_mode
self.act = act
self.avg_pool = AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True)
kernel_size=2, stride=stride, padding="SAME", ceil_mode=True)
self.conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
stride=1 if is_vd_mode else stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(learning_rate=lr_mult),
bias_attr=False,
data_format=data_format)
weight_attr = ParamAttr(learning_rate=lr_mult, trainable=True)
bias_attr = ParamAttr(learning_rate=lr_mult, trainable=True)
self.bn = BatchNorm(
num_filters,
param_attr=ParamAttr(learning_rate=lr_mult),
......@@ -159,7 +164,6 @@ class BottleneckBlock(TheseusLayer):
lr_mult=1.0,
data_format="NCHW"):
super().__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
......@@ -188,10 +192,11 @@ class BottleneckBlock(TheseusLayer):
num_channels=num_channels,
num_filters=num_filters * 4,
filter_size=1,
stride=stride if if_first else 1,
stride=stride,
is_vd_mode=False if if_first else True,
lr_mult=lr_mult,
data_format=data_format)
self.relu = nn.ReLU()
self.shortcut = shortcut
......@@ -242,7 +247,7 @@ class BasicBlock(TheseusLayer):
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
stride=stride if if_first else 1,
stride=stride,
is_vd_mode=False if if_first else True,
lr_mult=lr_mult,
data_format=data_format)
......@@ -281,14 +286,17 @@ class ResNet(TheseusLayer):
stem_act="relu",
class_num=1000,
lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0],
stride_list=[2, 2, 2, 2, 2],
data_format="NCHW",
input_image_channel=3,
return_patterns=None,
return_stages=None):
return_stages=None,
**kargs):
super().__init__()
self.cfg = config
self.lr_mult_list = lr_mult_list
self.stride_list = stride_list
self.is_vd_mode = version == "vd"
self.class_num = class_num
self.num_filters = [64, 128, 256, 512]
......@@ -305,14 +313,22 @@ class ResNet(TheseusLayer):
) == 5, "lr_mult_list length should be 5 but got {}".format(
len(self.lr_mult_list))
assert isinstance(self.stride_list, (
list, tuple
)), "stride_list should be in (list, tuple) but got {}".format(
type(self.stride_list))
assert len(self.stride_list
) == 5, "stride_list length should be 5 but got {}".format(
len(self.stride_list))
self.stem_cfg = {
#num_channels, num_filters, filter_size, stride
"vb": [[input_image_channel, 64, 7, 2]],
"vb": [[input_image_channel, 64, 7, self.stride_list[0]]],
"vd":
[[input_image_channel, 32, 3, 2], [32, 32, 3, 1], [32, 64, 3, 1]]
[[input_image_channel, 32, 3, self.stride_list[0]], [32, 32, 3, 1], [32, 64, 3, 1]]
}
self.stem = nn.Sequential(*[
self.stem = nn.Sequential(* [
ConvBNLayer(
num_channels=in_c,
num_filters=out_c,
......@@ -325,7 +341,7 @@ class ResNet(TheseusLayer):
])
self.max_pool = MaxPool2D(
kernel_size=3, stride=2, padding=1, data_format=data_format)
kernel_size=3, stride=stride_list[1], padding=1, data_format=data_format)
block_list = []
for block_idx in range(len(self.block_depth)):
shortcut = False
......@@ -334,7 +350,7 @@ class ResNet(TheseusLayer):
num_channels=self.num_channels[block_idx] if i == 0 else
self.num_filters[block_idx] * self.channels_mult,
num_filters=self.num_filters[block_idx],
stride=2 if i == 0 and block_idx != 0 else 1,
stride=self.stride_list[block_idx+1] if i == 0 and block_idx != 0 else 1,
shortcut=shortcut,
if_first=block_idx == i == 0 if version == "vd" else True,
lr_mult=self.lr_mult_list[block_idx + 1],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册