googlenet.py 7.2 KB
Newer Older
W
WuHaobo 已提交
1
import paddle
littletomatodonkey's avatar
littletomatodonkey 已提交
2 3 4
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
5 6
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
littletomatodonkey's avatar
littletomatodonkey 已提交
7 8
from paddle.nn.initializer import Uniform

9 10
import math

C
cuicheng01 已提交
11 12 13 14 15 16
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url

MODEL_URLS = {"GoogLeNet": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams",
             }

__all__ = list(MODEL_URLS.keys())
littletomatodonkey's avatar
littletomatodonkey 已提交
17

18 19 20 21

def xavier(channels, filter_size, name):
    stdv = (3.0 / (filter_size**2 * channels))**0.5
    param_attr = ParamAttr(
littletomatodonkey's avatar
littletomatodonkey 已提交
22
        initializer=Uniform(-stdv, stdv), name=name + "_weights")
23 24 25
    return param_attr


littletomatodonkey's avatar
littletomatodonkey 已提交
26
class ConvLayer(nn.Layer):
27 28 29 30 31 32 33 34 35 36
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None):
        super(ConvLayer, self).__init__()

37
        self._conv = Conv2D(
littletomatodonkey's avatar
littletomatodonkey 已提交
38 39 40
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
W
WuHaobo 已提交
41 42 43
            stride=stride,
            padding=(filter_size - 1) // 2,
            groups=groups,
littletomatodonkey's avatar
littletomatodonkey 已提交
44
            weight_attr=ParamAttr(name=name + "_weights"),
45 46 47 48 49 50 51
            bias_attr=False)

    def forward(self, inputs):
        y = self._conv(inputs)
        return y


littletomatodonkey's avatar
littletomatodonkey 已提交
52
class Inception(nn.Layer):
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
    def __init__(self,
                 input_channels,
                 output_channels,
                 filter1,
                 filter3R,
                 filter3,
                 filter5R,
                 filter5,
                 proj,
                 name=None):
        super(Inception, self).__init__()

        self._conv1 = ConvLayer(
            input_channels, filter1, 1, name="inception_" + name + "_1x1")
        self._conv3r = ConvLayer(
            input_channels,
            filter3R,
            1,
W
WuHaobo 已提交
71
            name="inception_" + name + "_3x3_reduce")
72 73 74 75 76 77
        self._conv3 = ConvLayer(
            filter3R, filter3, 3, name="inception_" + name + "_3x3")
        self._conv5r = ConvLayer(
            input_channels,
            filter5R,
            1,
W
WuHaobo 已提交
78
            name="inception_" + name + "_5x5_reduce")
79 80
        self._conv5 = ConvLayer(
            filter5R, filter5, 5, name="inception_" + name + "_5x5")
81
        self._pool = MaxPool2D(kernel_size=3, stride=1, padding=1)
littletomatodonkey's avatar
littletomatodonkey 已提交
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
        self._convprj = ConvLayer(
            input_channels, proj, 1, name="inception_" + name + "_3x3_proj")

    def forward(self, inputs):
        conv1 = self._conv1(inputs)

        conv3r = self._conv3r(inputs)
        conv3 = self._conv3(conv3r)

        conv5r = self._conv5r(inputs)
        conv5 = self._conv5(conv5r)

        pool = self._pool(inputs)
        convprj = self._convprj(pool)

littletomatodonkey's avatar
littletomatodonkey 已提交
98 99
        cat = paddle.concat([conv1, conv3, conv5, convprj], axis=1)
        cat = F.relu(cat)
littletomatodonkey's avatar
littletomatodonkey 已提交
100
        return cat
101 102


T
Tingquan Gao 已提交
103
class GoogLeNetDY(nn.Layer):
104
    def __init__(self, class_dim=1000):
T
Tingquan Gao 已提交
105
        super(GoogLeNetDY, self).__init__()
106
        self._conv = ConvLayer(3, 64, 7, 2, name="conv1")
107
        self._pool = MaxPool2D(kernel_size=3, stride=2)
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
        self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1")
        self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3")

        self._ince3a = Inception(
            192, 192, 64, 96, 128, 16, 32, 32, name="ince3a")
        self._ince3b = Inception(
            256, 256, 128, 128, 192, 32, 96, 64, name="ince3b")

        self._ince4a = Inception(
            480, 480, 192, 96, 208, 16, 48, 64, name="ince4a")
        self._ince4b = Inception(
            512, 512, 160, 112, 224, 24, 64, 64, name="ince4b")
        self._ince4c = Inception(
            512, 512, 128, 128, 256, 24, 64, 64, name="ince4c")
        self._ince4d = Inception(
            512, 512, 112, 144, 288, 32, 64, 64, name="ince4d")
        self._ince4e = Inception(
            528, 528, 256, 160, 320, 32, 128, 128, name="ince4e")

        self._ince5a = Inception(
            832, 832, 256, 160, 320, 32, 128, 128, name="ince5a")
        self._ince5b = Inception(
            832, 832, 384, 192, 384, 48, 128, 128, name="ince5b")

132
        self._pool_5 = AvgPool2D(kernel_size=7, stride=7)
133

littletomatodonkey's avatar
littletomatodonkey 已提交
134
        self._drop = Dropout(p=0.4, mode="downscale_in_infer")
135 136 137
        self._fc_out = Linear(
            1024,
            class_dim,
littletomatodonkey's avatar
littletomatodonkey 已提交
138 139
            weight_attr=xavier(1024, 1, "out"),
            bias_attr=ParamAttr(name="out_offset"))
140
        self._pool_o1 = AvgPool2D(kernel_size=5, stride=3)
141 142 143 144
        self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1")
        self._fc_o1 = Linear(
            1152,
            1024,
littletomatodonkey's avatar
littletomatodonkey 已提交
145 146
            weight_attr=xavier(2048, 1, "fc_o1"),
            bias_attr=ParamAttr(name="fc_o1_offset"))
littletomatodonkey's avatar
littletomatodonkey 已提交
147
        self._drop_o1 = Dropout(p=0.7, mode="downscale_in_infer")
148 149 150
        self._out1 = Linear(
            1024,
            class_dim,
littletomatodonkey's avatar
littletomatodonkey 已提交
151 152
            weight_attr=xavier(1024, 1, "out1"),
            bias_attr=ParamAttr(name="out1_offset"))
153
        self._pool_o2 = AvgPool2D(kernel_size=5, stride=3)
154 155 156 157
        self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2")
        self._fc_o2 = Linear(
            1152,
            1024,
littletomatodonkey's avatar
littletomatodonkey 已提交
158
            weight_attr=xavier(2048, 1, "fc_o2"),
159
            bias_attr=ParamAttr(name="fc_o2_offset"))
littletomatodonkey's avatar
littletomatodonkey 已提交
160
        self._drop_o2 = Dropout(p=0.7, mode="downscale_in_infer")
161 162 163
        self._out2 = Linear(
            1024,
            class_dim,
littletomatodonkey's avatar
littletomatodonkey 已提交
164
            weight_attr=xavier(1024, 1, "out2"),
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
            bias_attr=ParamAttr(name="out2_offset"))

    def forward(self, inputs):
        x = self._conv(inputs)
        x = self._pool(x)
        x = self._conv_1(x)
        x = self._conv_2(x)
        x = self._pool(x)

        x = self._ince3a(x)
        x = self._ince3b(x)
        x = self._pool(x)

        ince4a = self._ince4a(x)
        x = self._ince4b(ince4a)
        x = self._ince4c(x)
        ince4d = self._ince4d(x)
        x = self._ince4e(ince4d)
        x = self._pool(x)

        x = self._ince5a(x)
        ince5b = self._ince5b(x)

        x = self._pool_5(ince5b)
        x = self._drop(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
190
        x = paddle.squeeze(x, axis=[2, 3])
191 192 193 194
        out = self._fc_out(x)

        x = self._pool_o1(ince4a)
        x = self._conv_o1(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
195
        x = paddle.flatten(x, start_axis=1, stop_axis=-1)
196
        x = self._fc_o1(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
197
        x = F.relu(x)
198 199 200 201 202
        x = self._drop_o1(x)
        out1 = self._out1(x)

        x = self._pool_o2(ince4d)
        x = self._conv_o2(x)
littletomatodonkey's avatar
littletomatodonkey 已提交
203
        x = paddle.flatten(x, start_axis=1, stop_axis=-1)
204 205 206
        x = self._fc_o2(x)
        x = self._drop_o2(x)
        out2 = self._out2(x)
W
WuHaobo 已提交
207
        return [out, out1, out2]
C
cuicheng01 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
    
    
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
    if pretrained is False:
        pass
    elif pretrained is True:
        load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
    elif isinstance(pretrained, str):
        load_dygraph_pretrain(model, pretrained)
    else:
        raise RuntimeError(
            "pretrained type is not available. Please use `string` or `boolean` type."
        )


def GoogLeNet(pretrained=False, use_ssld=False, **kwargs):
    model = GoogLeNetDY(**kwargs)
    _load_pretrained(pretrained, model, MODEL_URLS["GoogLeNet"], use_ssld=use_ssld)
littletomatodonkey's avatar
littletomatodonkey 已提交
226
    return model