未验证 提交 fe302aec 编写于 作者: L littletomatodonkey 提交者: GitHub

Merge pull request #183 from wqz960/PaddleClas-dy

add Inception, ResNeXt101_wsl, EfficientNet and other models
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math
__all__ = ['AlexNet'] __all__ = ["AlexNet"]
class AlexNet():
def __init__(self):
pass
def net(self, input, class_dim=1000):
stdv = 1.0 / math.sqrt(input.shape[1] * 11 * 11)
layer_name = [
"conv1", "conv2", "conv3", "conv4", "conv5", "fc6", "fc7", "fc8"
]
conv1 = fluid.layers.conv2d(
input=input,
num_filters=64,
filter_size=11,
stride=4,
padding=2,
groups=1,
act='relu',
bias_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[0] + "_offset"),
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[0] + "_weights"))
pool1 = fluid.layers.pool2d(
input=conv1,
pool_size=3,
pool_stride=2,
pool_padding=0,
pool_type='max')
stdv = 1.0 / math.sqrt(pool1.shape[1] * 5 * 5)
conv2 = fluid.layers.conv2d(
input=pool1,
num_filters=192,
filter_size=5,
stride=1,
padding=2,
groups=1,
act='relu',
bias_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[1] + "_offset"),
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[1] + "_weights"))
pool2 = fluid.layers.pool2d(
input=conv2,
pool_size=3,
pool_stride=2,
pool_padding=0,
pool_type='max')
stdv = 1.0 / math.sqrt(pool2.shape[1] * 3 * 3)
conv3 = fluid.layers.conv2d(
input=pool2,
num_filters=384,
filter_size=3,
stride=1,
padding=1,
groups=1,
act='relu',
bias_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[2] + "_offset"),
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[2] + "_weights"))
stdv = 1.0 / math.sqrt(conv3.shape[1] * 3 * 3)
conv4 = fluid.layers.conv2d(
input=conv3,
num_filters=256,
filter_size=3,
stride=1,
padding=1,
groups=1,
act='relu',
bias_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[3] + "_offset"),
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[3] + "_weights"))
stdv = 1.0 / math.sqrt(conv4.shape[1] * 3 * 3) class ConvPoolLayer(fluid.dygraph.Layer):
conv5 = fluid.layers.conv2d( def __init__(self,
input=conv4, inputc_channels,
num_filters=256, output_channels,
filter_size=3, filter_size,
stride=1, stride,
padding=1, padding,
stdv,
groups=1, groups=1,
act='relu', act=None,
bias_attr=fluid.param_attr.ParamAttr( name=None):
initializer=fluid.initializer.Uniform(-stdv, stdv), super(ConvPoolLayer, self).__init__()
name=layer_name[4] + "_offset"),
param_attr=fluid.param_attr.ParamAttr( self._conv = Conv2D(num_channels=inputc_channels,
initializer=fluid.initializer.Uniform(-stdv, stdv), num_filters=output_channels,
name=layer_name[4] + "_weights")) filter_size=filter_size,
pool5 = fluid.layers.pool2d( stride=stride,
input=conv5, padding=padding,
pool_size=3, groups=groups,
param_attr=ParamAttr(name=name + "_weights",
initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name=name + "_offset",
initializer=fluid.initializer.Uniform(-stdv, stdv)),
act=act)
self._pool = Pool2D(pool_size=3,
pool_stride=2, pool_stride=2,
pool_padding=0, pool_padding=0,
pool_type='max') pool_type="max")
drop6 = fluid.layers.dropout(x=pool5, dropout_prob=0.5) def forward(self, inputs):
stdv = 1.0 / math.sqrt(drop6.shape[1] * drop6.shape[2] * x = self._conv(inputs)
drop6.shape[3] * 1.0) x = self._pool(x)
return x
fc6 = fluid.layers.fc(
input=drop6,
size=4096, class AlexNetDY(fluid.dygraph.Layer):
act='relu', def __init__(self, class_dim=1000):
bias_attr=fluid.param_attr.ParamAttr( super(AlexNetDY, self).__init__()
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[5] + "_offset"), stdv = 1.0/math.sqrt(3*11*11)
param_attr=fluid.param_attr.ParamAttr( self._conv1 = ConvPoolLayer(
initializer=fluid.initializer.Uniform(-stdv, stdv), 3, 64, 11, 4, 2, stdv, act="relu", name="conv1")
name=layer_name[5] + "_weights")) stdv = 1.0/math.sqrt(64*5*5)
self._conv2 = ConvPoolLayer(
drop7 = fluid.layers.dropout(x=fc6, dropout_prob=0.5) 64, 192, 5, 1, 2, stdv, act="relu", name="conv2")
stdv = 1.0 / math.sqrt(drop7.shape[1] * 1.0) stdv = 1.0/math.sqrt(192*3*3)
self._conv3 = Conv2D(192, 384, 3, stride=1, padding=1,
fc7 = fluid.layers.fc( param_attr=ParamAttr(name="conv3_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
input=drop7, bias_attr=ParamAttr(name="conv3_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
size=4096, act="relu")
act='relu', stdv = 1.0/math.sqrt(384*3*3)
bias_attr=fluid.param_attr.ParamAttr( self._conv4 = Conv2D(384, 256, 3, stride=1, padding=1,
initializer=fluid.initializer.Uniform(-stdv, stdv), param_attr=ParamAttr(name="conv4_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
name=layer_name[6] + "_offset"), bias_attr=ParamAttr(name="conv4_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
param_attr=fluid.param_attr.ParamAttr( act="relu")
initializer=fluid.initializer.Uniform(-stdv, stdv), stdv = 1.0/math.sqrt(256*3*3)
name=layer_name[6] + "_weights")) self._conv5 = ConvPoolLayer(
256, 256, 3, 1, 1, stdv, act="relu", name="conv5")
stdv = 1.0 / math.sqrt(fc7.shape[1] * 1.0) stdv = 1.0/math.sqrt(256*6*6)
out = fluid.layers.fc(
input=fc7, self._drop1 = Dropout(p=0.5)
size=class_dim, self._fc6 = Linear(input_dim=256*6*6,
bias_attr=fluid.param_attr.ParamAttr( output_dim=4096,
initializer=fluid.initializer.Uniform(-stdv, stdv), param_attr=ParamAttr(name="fc6_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
name=layer_name[7] + "_offset"), bias_attr=ParamAttr(name="fc6_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
param_attr=fluid.param_attr.ParamAttr( act="relu")
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=layer_name[7] + "_weights")) self._drop2 = Dropout(p=0.5)
return out self._fc7 = Linear(input_dim=4096,
output_dim=4096,
param_attr=ParamAttr(name="fc7_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc7_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
act="relu")
self._fc8 = Linear(input_dim=4096,
output_dim=class_dim,
param_attr=ParamAttr(name="fc8_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc8_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)))
def forward(self, inputs):
x = self._conv1(inputs)
x = self._conv2(x)
x = self._conv3(x)
x = self._conv4(x)
x = self._conv5(x)
x = fluid.layers.flatten(x, axis=0)
x = self._drop1(x)
x = self._fc6(x)
x = self._drop2(x)
x = self._fc7(x)
x = self._fc8(x)
return x
def AlexNet(**args):
model = AlexNetDY(**args)
return model
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. import paddle
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
import math import math
__all__ = ["DarkNet53"] __all__ = ["DarkNet53"]
class DarkNet53(): class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self): def __init__(self,
input_channels,
pass output_channels,
def net(self, input, class_dim=1000):
DarkNet_cfg = {53: ([1, 2, 8, 8, 4], self.basicblock)}
stages, block_func = DarkNet_cfg[53]
stages = stages[0:5]
conv1 = self.conv_bn_layer(
input,
ch_out=32,
filter_size=3,
stride=1,
padding=1,
name="yolo_input")
conv = self.downsample(
conv1, ch_out=conv1.shape[1] * 2, name="yolo_input.downsample")
for i, stage in enumerate(stages):
conv = self.layer_warp(
block_func,
conv,
32 * (2**i),
stage,
name="stage.{}".format(i))
if i < len(stages) - 1: # do not downsaple in the last stage
conv = self.downsample(
conv,
ch_out=conv.shape[1] * 2,
name="stage.{}.downsample".format(i))
pool = fluid.layers.pool2d(
input=conv, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name='fc_weights'),
bias_attr=ParamAttr(name='fc_offset'))
return out
def conv_bn_layer(self,
input,
ch_out,
filter_size, filter_size,
stride, stride,
padding, padding,
name=None): name=None):
conv = fluid.layers.conv2d( super(ConvBNLayer, self).__init__()
input=input,
num_filters=ch_out, self._conv = Conv2D(
num_channels=input_channels,
num_filters=output_channels,
filter_size=filter_size, filter_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
...@@ -82,39 +28,133 @@ class DarkNet53(): ...@@ -82,39 +28,133 @@ class DarkNet53():
bias_attr=False) bias_attr=False)
bn_name = name + ".bn" bn_name = name + ".bn"
out = fluid.layers.batch_norm( self._bn = BatchNorm(
input=conv, num_channels=output_channels,
act='relu', act="relu",
param_attr=ParamAttr(name=bn_name + '.scale'), param_attr=ParamAttr(name=bn_name + ".scale"),
bias_attr=ParamAttr(name=bn_name + '.offset'), bias_attr=ParamAttr(name=bn_name + ".offset"),
moving_mean_name=bn_name + '.mean', moving_mean_name=bn_name + ".mean",
moving_variance_name=bn_name + '.var') moving_variance_name=bn_name + ".var")
return out
def forward(self, inputs):
def downsample(self, x = self._conv(inputs)
input, x = self._bn(x)
ch_out, return x
filter_size=3,
stride=2,
padding=1, class BasicBlock(fluid.dygraph.Layer):
name=None): def __init__(self, input_channels, output_channels, name=None):
return self.conv_bn_layer( super(BasicBlock, self).__init__()
input,
ch_out=ch_out, self._conv1 = ConvBNLayer(
filter_size=filter_size, input_channels, output_channels, 1, 1, 0, name=name + ".0")
stride=stride, self._conv2 = ConvBNLayer(
padding=padding, output_channels, output_channels * 2, 3, 1, 1, name=name + ".1")
name=name)
def forward(self, inputs):
def basicblock(self, input, ch_out, name=None): x = self._conv1(inputs)
conv1 = self.conv_bn_layer(input, ch_out, 1, 1, 0, name=name + ".0") x = self._conv2(x)
conv2 = self.conv_bn_layer( return fluid.layers.elementwise_add(x=inputs, y=x)
conv1, ch_out * 2, 3, 1, 1, name=name + ".1")
out = fluid.layers.elementwise_add(x=input, y=conv2, act=None)
return out class DarkNet(fluid.dygraph.Layer):
def __init__(self, class_dim=1000):
def layer_warp(self, block_func, input, ch_out, count, name=None): super(DarkNet, self).__init__()
res_out = block_func(input, ch_out, name='{}.0'.format(name))
for j in range(1, count): self.stages = [1, 2, 8, 8, 4]
res_out = block_func(res_out, ch_out, name='{}.{}'.format(name, j)) self._conv1 = ConvBNLayer(3, 32, 3, 1, 1, name="yolo_input")
return res_out self._conv2 = ConvBNLayer(
32, 64, 3, 2, 1, name="yolo_input.downsample")
self._basic_block_01 = BasicBlock(64, 32, name="stage.0.0")
self._downsample_0 = ConvBNLayer(
64, 128, 3, 2, 1, name="stage.0.downsample")
self._basic_block_11 = BasicBlock(128, 64, name="stage.1.0")
self._basic_block_12 = BasicBlock(128, 64, name="stage.1.1")
self._downsample_1 = ConvBNLayer(
128, 256, 3, 2, 1, name="stage.1.downsample")
self._basic_block_21 = BasicBlock(256, 128, name="stage.2.0")
self._basic_block_22 = BasicBlock(256, 128, name="stage.2.1")
self._basic_block_23 = BasicBlock(256, 128, name="stage.2.2")
self._basic_block_24 = BasicBlock(256, 128, name="stage.2.3")
self._basic_block_25 = BasicBlock(256, 128, name="stage.2.4")
self._basic_block_26 = BasicBlock(256, 128, name="stage.2.5")
self._basic_block_27 = BasicBlock(256, 128, name="stage.2.6")
self._basic_block_28 = BasicBlock(256, 128, name="stage.2.7")
self._downsample_2 = ConvBNLayer(
256, 512, 3, 2, 1, name="stage.2.downsample")
self._basic_block_31 = BasicBlock(512, 256, name="stage.3.0")
self._basic_block_32 = BasicBlock(512, 256, name="stage.3.1")
self._basic_block_33 = BasicBlock(512, 256, name="stage.3.2")
self._basic_block_34 = BasicBlock(512, 256, name="stage.3.3")
self._basic_block_35 = BasicBlock(512, 256, name="stage.3.4")
self._basic_block_36 = BasicBlock(512, 256, name="stage.3.5")
self._basic_block_37 = BasicBlock(512, 256, name="stage.3.6")
self._basic_block_38 = BasicBlock(512, 256, name="stage.3.7")
self._downsample_3 = ConvBNLayer(
512, 1024, 3, 2, 1, name="stage.3.downsample")
self._basic_block_41 = BasicBlock(1024, 512, name="stage.4.0")
self._basic_block_42 = BasicBlock(1024, 512, name="stage.4.1")
self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2")
self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3")
self._pool = Pool2D(pool_type="avg", global_pooling=True)
stdv = 1.0 / math.sqrt(1024.0)
self._out = Linear(
input_dim=1024,
output_dim=class_dim,
param_attr=ParamAttr(
name="fc_weights",
initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs):
x = self._conv1(inputs)
x = self._conv2(x)
x = self._basic_block_01(x)
x = self._downsample_0(x)
x = self._basic_block_11(x)
x = self._basic_block_12(x)
x = self._downsample_1(x)
x = self._basic_block_21(x)
x = self._basic_block_22(x)
x = self._basic_block_23(x)
x = self._basic_block_24(x)
x = self._basic_block_25(x)
x = self._basic_block_26(x)
x = self._basic_block_27(x)
x = self._basic_block_28(x)
x = self._downsample_2(x)
x = self._basic_block_31(x)
x = self._basic_block_32(x)
x = self._basic_block_33(x)
x = self._basic_block_34(x)
x = self._basic_block_35(x)
x = self._basic_block_36(x)
x = self._basic_block_37(x)
x = self._basic_block_38(x)
x = self._downsample_3(x)
x = self._basic_block_41(x)
x = self._basic_block_42(x)
x = self._basic_block_43(x)
x = self._basic_block_44(x)
x = self._pool(x)
x = fluid.layers.squeeze(x, axes=[2, 3])
x = self._out(x)
return x
def DarkNet53(**args):
model = DarkNet(**args)
return model
\ No newline at end of file
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
import math
__all__ = ['GoogLeNet'] __all__ = ['GoogLeNet_DY']
def xavier(channels, filter_size, name):
stdv = (3.0 / (filter_size**2 * channels))**0.5
param_attr = ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + "_weights")
class GoogLeNet(): return param_attr
def __init__(self):
pass
def conv_layer(self, class ConvLayer(fluid.dygraph.Layer):
input, def __init__(self,
num_channels,
num_filters, num_filters,
filter_size, filter_size,
stride=1, stride=1,
groups=1, groups=1,
act=None, act=None,
name=None): name=None):
channels = input.shape[1] super(ConvLayer, self).__init__()
stdv = (3.0 / (filter_size**2 * channels))**0.5
param_attr = ParamAttr( self._conv = Conv2D(
initializer=fluid.initializer.Uniform(-stdv, stdv), num_channels=num_channels,
name=name + "_weights")
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters, num_filters=num_filters,
filter_size=filter_size, filter_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=act, act=None,
param_attr=param_attr, param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False, bias_attr=False)
name=name)
return conv
def xavier(self, channels, filter_size, name): def forward(self, inputs):
stdv = (3.0 / (filter_size**2 * channels))**0.5 y = self._conv(inputs)
param_attr = ParamAttr( return y
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + "_weights")
return param_attr
def inception(self, class Inception(fluid.dygraph.Layer):
input, def __init__(self,
channels, input_channels,
output_channels,
filter1, filter1,
filter3R, filter3R,
filter3, filter3,
...@@ -72,166 +54,155 @@ class GoogLeNet(): ...@@ -72,166 +54,155 @@ class GoogLeNet():
filter5, filter5,
proj, proj,
name=None): name=None):
conv1 = self.conv_layer( super(Inception, self).__init__()
input=input,
num_filters=filter1, self._conv1 = ConvLayer(
filter_size=1, input_channels, filter1, 1, name="inception_" + name + "_1x1")
stride=1, self._conv3r = ConvLayer(
act=None, input_channels,
name="inception_" + name + "_1x1") filter3R,
conv3r = self.conv_layer( 1,
input=input,
num_filters=filter3R,
filter_size=1,
stride=1,
act=None,
name="inception_" + name + "_3x3_reduce") name="inception_" + name + "_3x3_reduce")
conv3 = self.conv_layer( self._conv3 = ConvLayer(
input=conv3r, filter3R, filter3, 3, name="inception_" + name + "_3x3")
num_filters=filter3, self._conv5r = ConvLayer(
filter_size=3, input_channels,
stride=1, filter5R,
act=None, 1,
name="inception_" + name + "_3x3")
conv5r = self.conv_layer(
input=input,
num_filters=filter5R,
filter_size=1,
stride=1,
act=None,
name="inception_" + name + "_5x5_reduce") name="inception_" + name + "_5x5_reduce")
conv5 = self.conv_layer( self._conv5 = ConvLayer(
input=conv5r, filter5R, filter5, 5, name="inception_" + name + "_5x5")
num_filters=filter5, self._pool = Pool2D(
filter_size=5, pool_size=3, pool_type="max", pool_stride=1, pool_padding=1)
stride=1, self._convprj = ConvLayer(
act=None, input_channels, proj, 1, name="inception_" + name + "_3x3_proj")
name="inception_" + name + "_5x5")
pool = fluid.layers.pool2d( def forward(self, inputs):
input=input, conv1 = self._conv1(inputs)
pool_size=3,
pool_stride=1, conv3r = self._conv3r(inputs)
pool_padding=1, conv3 = self._conv3(conv3r)
pool_type='max')
convprj = fluid.layers.conv2d( conv5r = self._conv5r(inputs)
input=pool, conv5 = self._conv5(conv5r)
filter_size=1,
num_filters=proj, pool = self._pool(inputs)
stride=1, convprj = self._convprj(pool)
padding=0,
name="inception_" + name + "_3x3_proj", cat = fluid.layers.concat([conv1, conv3, conv5, convprj], axis=1)
param_attr=ParamAttr( layer_helper = LayerHelper(self.full_name(), act="relu")
name="inception_" + name + "_3x3_proj_weights"), return layer_helper.append_activation(cat)
bias_attr=False)
cat = fluid.layers.concat(input=[conv1, conv3, conv5, convprj], axis=1)
cat = fluid.layers.relu(cat) class GoogleNetDY(fluid.dygraph.Layer):
return cat def __init__(self, class_dim=1000):
super(GoogleNetDY, self).__init__()
def net(self, input, class_dim=1000): self._conv = ConvLayer(3, 64, 7, 2, name="conv1")
conv = self.conv_layer( self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2)
input=input, self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1")
num_filters=64, self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3")
filter_size=7,
stride=2, self._ince3a = Inception(
act=None, 192, 192, 64, 96, 128, 16, 32, 32, name="ince3a")
name="conv1") self._ince3b = Inception(
pool = fluid.layers.pool2d( 256, 256, 128, 128, 192, 32, 96, 64, name="ince3b")
input=conv, pool_size=3, pool_type='max', pool_stride=2)
self._ince4a = Inception(
conv = self.conv_layer( 480, 480, 192, 96, 208, 16, 48, 64, name="ince4a")
input=pool, self._ince4b = Inception(
num_filters=64, 512, 512, 160, 112, 224, 24, 64, 64, name="ince4b")
filter_size=1, self._ince4c = Inception(
stride=1, 512, 512, 128, 128, 256, 24, 64, 64, name="ince4c")
act=None, self._ince4d = Inception(
name="conv2_1x1") 512, 512, 112, 144, 288, 32, 64, 64, name="ince4d")
conv = self.conv_layer( self._ince4e = Inception(
input=conv, 528, 528, 256, 160, 320, 32, 128, 128, name="ince4e")
num_filters=192,
filter_size=3, self._ince5a = Inception(
stride=1, 832, 832, 256, 160, 320, 32, 128, 128, name="ince5a")
act=None, self._ince5b = Inception(
name="conv2_3x3") 832, 832, 384, 192, 384, 48, 128, 128, name="ince5b")
pool = fluid.layers.pool2d(
input=conv, pool_size=3, pool_type='max', pool_stride=2) self._pool_5 = Pool2D(pool_size=7, pool_type='avg', pool_stride=7)
ince3a = self.inception(pool, 192, 64, 96, 128, 16, 32, 32, "ince3a") self._drop = fluid.dygraph.Dropout(p=0.4)
ince3b = self.inception(ince3a, 256, 128, 128, 192, 32, 96, 64, self._fc_out = Linear(
"ince3b") 1024,
pool3 = fluid.layers.pool2d( class_dim,
input=ince3b, pool_size=3, pool_type='max', pool_stride=2) param_attr=xavier(1024, 1, "out"),
bias_attr=ParamAttr(name="out_offset"),
ince4a = self.inception(pool3, 480, 192, 96, 208, 16, 48, 64, "ince4a") act="softmax")
ince4b = self.inception(ince4a, 512, 160, 112, 224, 24, 64, 64, self._pool_o1 = Pool2D(pool_size=5, pool_stride=3, pool_type="avg")
"ince4b") self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1")
ince4c = self.inception(ince4b, 512, 128, 128, 256, 24, 64, 64, self._fc_o1 = Linear(
"ince4c") 1152,
ince4d = self.inception(ince4c, 512, 112, 144, 288, 32, 64, 64, 1024,
"ince4d") param_attr=xavier(2048, 1, "fc_o1"),
ince4e = self.inception(ince4d, 528, 256, 160, 320, 32, 128, 128, bias_attr=ParamAttr(name="fc_o1_offset"),
"ince4e") act="relu")
pool4 = fluid.layers.pool2d( self._drop_o1 = fluid.dygraph.Dropout(p=0.7)
input=ince4e, pool_size=3, pool_type='max', pool_stride=2) self._out1 = Linear(
1024,
ince5a = self.inception(pool4, 832, 256, 160, 320, 32, 128, 128, class_dim,
"ince5a") param_attr=xavier(1024, 1, "out1"),
ince5b = self.inception(ince5a, 832, 384, 192, 384, 48, 128, 128, bias_attr=ParamAttr(name="out1_offset"),
"ince5b") act="softmax")
pool5 = fluid.layers.pool2d( self._pool_o2 = Pool2D(pool_size=5, pool_stride=3, pool_type='avg')
input=ince5b, pool_size=7, pool_type='avg', pool_stride=7) self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2")
dropout = fluid.layers.dropout(x=pool5, dropout_prob=0.4) self._fc_o2 = Linear(
out = fluid.layers.fc(input=dropout, 1152,
size=class_dim, 1024,
act='softmax', param_attr=xavier(2048, 1, "fc_o2"),
param_attr=self.xavier(1024, 1, "out"),
name="out",
bias_attr=ParamAttr(name="out_offset"))
pool_o1 = fluid.layers.pool2d(
input=ince4a, pool_size=5, pool_type='avg', pool_stride=3)
conv_o1 = self.conv_layer(
input=pool_o1,
num_filters=128,
filter_size=1,
stride=1,
act=None,
name="conv_o1")
fc_o1 = fluid.layers.fc(input=conv_o1,
size=1024,
act='relu',
param_attr=self.xavier(2048, 1, "fc_o1"),
name="fc_o1",
bias_attr=ParamAttr(name="fc_o1_offset"))
dropout_o1 = fluid.layers.dropout(x=fc_o1, dropout_prob=0.7)
out1 = fluid.layers.fc(input=dropout_o1,
size=class_dim,
act='softmax',
param_attr=self.xavier(1024, 1, "out1"),
name="out1",
bias_attr=ParamAttr(name="out1_offset"))
pool_o2 = fluid.layers.pool2d(
input=ince4d, pool_size=5, pool_type='avg', pool_stride=3)
conv_o2 = self.conv_layer(
input=pool_o2,
num_filters=128,
filter_size=1,
stride=1,
act=None,
name="conv_o2")
fc_o2 = fluid.layers.fc(input=conv_o2,
size=1024,
act='relu',
param_attr=self.xavier(2048, 1, "fc_o2"),
name="fc_o2",
bias_attr=ParamAttr(name="fc_o2_offset")) bias_attr=ParamAttr(name="fc_o2_offset"))
dropout_o2 = fluid.layers.dropout(x=fc_o2, dropout_prob=0.7) self._drop_o2 = fluid.dygraph.Dropout(p=0.7)
out2 = fluid.layers.fc(input=dropout_o2, self._out2 = Linear(
size=class_dim, 1024,
act='softmax', class_dim,
param_attr=self.xavier(1024, 1, "out2"), param_attr=xavier(1024, 1, "out2"),
name="out2",
bias_attr=ParamAttr(name="out2_offset")) bias_attr=ParamAttr(name="out2_offset"))
# last fc layer is "out" def forward(self, inputs):
x = self._conv(inputs)
x = self._pool(x)
x = self._conv_1(x)
x = self._conv_2(x)
x = self._pool(x)
x = self._ince3a(x)
x = self._ince3b(x)
x = self._pool(x)
ince4a = self._ince4a(x)
x = self._ince4b(ince4a)
x = self._ince4c(x)
ince4d = self._ince4d(x)
x = self._ince4e(ince4d)
x = self._pool(x)
x = self._ince5a(x)
ince5b = self._ince5b(x)
x = self._pool_5(ince5b)
x = self._drop(x)
x = fluid.layers.squeeze(x, axes=[2, 3])
out = self._fc_out(x)
x = self._pool_o1(ince4a)
x = self._conv_o1(x)
x = fluid.layers.flatten(x)
x = self._fc_o1(x)
x = self._drop_o1(x)
out1 = self._out1(x)
x = self._pool_o2(ince4d)
x = self._conv_o2(x)
x = fluid.layers.flatten(x)
x = self._fc_o2(x)
x = self._drop_o2(x)
out2 = self._out2(x)
return [out, out1, out2] return [out, out1, out2]
def GoogLeNet(**args):
model = GoogleNetDY(**args)
return model
\ No newline at end of file
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
__all__ = [ __all__ = ["ResNeXt101_32x8d_wsl",
"ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl", "ResNeXt101_32x32d_wsl", "ResNeXt101_wsl_32x16d_wsl",
"ResNeXt101_32x48d_wsl", "Fix_ResNeXt101_32x48d_wsl" "ResNeXt101_wsl_32x32d_wsl",
] "ResNeXt101_wsl_32x48d_wsl"]
class ResNeXt101_wsl(): class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self, layers=101, cardinality=32, width=48): def __init__(self,
self.layers = layers input_channels,
self.cardinality = cardinality output_channels,
self.width = width
def net(self, input, class_dim=1000):
layers = self.layers
cardinality = self.cardinality
width = self.width
depth = [3, 4, 23, 3]
base_width = cardinality * width
num_filters = [base_width * i for i in [1, 2, 4, 8]]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu',
name="conv1") #debug
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv_name = 'layer' + str(block + 1) + "." + str(i)
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
name=conv_name)
pool = fluid.layers.pool2d(
input=conv, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name='fc.weight'),
bias_attr=fluid.param_attr.ParamAttr(name='fc.bias'))
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size, filter_size,
stride=1, stride=1,
groups=1, groups=1,
act=None, act=None,
name=None): name=None):
super(ConvBNLayer, self).__init__()
if "downsample" in name: if "downsample" in name:
conv_name = name + '.0' conv_name = name + ".0"
else: else:
conv_name = name conv_name = name
conv = fluid.layers.conv2d( self._conv = Conv2D(num_channels=input_channels,
input=input, num_filters=output_channels,
num_filters=num_filters,
filter_size=filter_size, filter_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size-1)//2,
groups=groups, groups=groups,
act=None, act=None,
param_attr=ParamAttr(name=conv_name + ".weight"), param_attr=ParamAttr(name=conv_name + ".weight"),
bias_attr=False) bias_attr=False)
if "downsample" in name: if "downsample" in name:
bn_name = name[:9] + 'downsample' + '.1' bn_name = name[:9] + "downsample.1"
else: else:
if "conv1" == name: if "conv1" == name:
bn_name = 'bn' + name[-1] bn_name = "bn" + name[-1]
else: else:
bn_name = (name[:10] if name[7:9].isdigit() else name[:9] bn_name = (name[:10] if name[7:9].isdigit() else name[:9]) + "bn" + name[-1]
) + 'bn' + name[-1] self._bn = BatchNorm(num_channels=output_channels,
return fluid.layers.batch_norm(
input=conv,
act=act, act=act,
param_attr=ParamAttr(name=bn_name + '.weight'), param_attr=ParamAttr(name=bn_name + ".weight"),
bias_attr=ParamAttr(bn_name + '.bias'), bias_attr=ParamAttr(name=bn_name + ".bias"),
moving_mean_name=bn_name + '.running_mean', moving_mean_name=bn_name + ".running_mean",
moving_variance_name=bn_name + '.running_var', ) moving_variance_name=bn_name + ".running_var")
def shortcut(self, input, ch_out, stride, name): def forward(self, inputs):
ch_in = input.shape[1] x = self._conv(inputs)
if ch_in != ch_out or stride != 1: x = self._bn(x)
return self.conv_bn_layer(input, ch_out, 1, stride, name=name) return x
else:
return input class ShortCut(fluid.dygraph.Layer):
def __init__(self, input_channels, output_channels, stride, name=None):
def bottleneck_block(self, input, num_filters, stride, cardinality, name): super(ShortCut, self).__init__()
cardinality = self.cardinality
width = self.width self.input_channels = input_channels
conv0 = self.conv_bn_layer( self.output_channels = output_channels
input=input, self.stride = stride
num_filters=num_filters, if input_channels!=output_channels or stride!=1:
filter_size=1, self._conv = ConvBNLayer(
act='relu', input_channels, output_channels, filter_size=1, stride=stride, name=name)
name=name + ".conv1")
conv1 = self.conv_bn_layer( def forward(self, inputs):
input=conv0, if self.input_channels!= self.output_channels or self.stride!=1:
num_filters=num_filters, return self._conv(inputs)
filter_size=3, return inputs
stride=stride,
groups=cardinality, class BottleneckBlock(fluid.dygraph.Layer):
act='relu', def __init__(self, input_channels, output_channels, stride, cardinality, width, name):
name=name + ".conv2") super(BottleneckBlock, self).__init__()
conv2 = self.conv_bn_layer(
input=conv1, self._conv0 = ConvBNLayer(
num_filters=num_filters // (width // 8), input_channels, output_channels, filter_size=1, act="relu", name=name + ".conv1")
filter_size=1, self._conv1 = ConvBNLayer(
act=None, output_channels, output_channels, filter_size=3, act="relu", stride=stride, groups=cardinality, name=name + ".conv2")
name=name + ".conv3") self._conv2 = ConvBNLayer(
output_channels, output_channels//(width//8), filter_size=1, act=None, name=name + ".conv3")
short = self.shortcut( self._short = ShortCut(
input, input_channels, output_channels//(width//8), stride=stride, name=name + ".downsample")
num_filters // (width // 8),
stride, def forward(self, inputs):
name=name + ".downsample") x = self._conv0(inputs)
x = self._conv1(x)
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') x = self._conv2(x)
y = self._short(inputs)
return fluid.layers.elementwise_add(x, y, act="relu")
class ResNeXt101WSL(fluid.dygraph.Layer):
def __init__(self, layers=101, cardinality=32, width=48, class_dim=1000):
super(ResNeXt101WSL, self).__init__()
self.class_dim = class_dim
self.layers = layers
def ResNeXt101_32x8d_wsl(): self.cardinality = cardinality
model = ResNeXt101_wsl(cardinality=32, width=8) self.width = width
return model self.scale = width//8
self.depth = [3, 4, 23, 3]
def ResNeXt101_32x16d_wsl(): self.base_width = cardinality * width
model = ResNeXt101_wsl(cardinality=32, width=16) num_filters = [self.base_width*i for i in [1,2,4,8]] #[256, 512, 1024, 2048]
self._conv_stem = ConvBNLayer(
3, 64, 7, stride=2, act="relu", name="conv1")
self._pool = Pool2D(pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type="max")
self._conv1_0 = BottleneckBlock(
64, num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.0")
self._conv1_1 = BottleneckBlock(
num_filters[0]//(width//8), num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.1")
self._conv1_2 = BottleneckBlock(
num_filters[0]//(width//8), num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.2")
self._conv2_0 = BottleneckBlock(
num_filters[0]//(width//8), num_filters[1], stride=2, cardinality=self.cardinality, width=self.width, name="layer2.0")
self._conv2_1 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.1")
self._conv2_2 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.2")
self._conv2_3 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.3")
self._conv3_0 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[2], stride=2, cardinality=self.cardinality, width=self.width, name="layer3.0")
self._conv3_1 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.1")
self._conv3_2 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.2")
self._conv3_3 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.3")
self._conv3_4 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.4")
self._conv3_5 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.5")
self._conv3_6 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.6")
self._conv3_7 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.7")
self._conv3_8 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.8")
self._conv3_9 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.9")
self._conv3_10 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.10")
self._conv3_11 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.11")
self._conv3_12 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.12")
self._conv3_13 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.13")
self._conv3_14 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.14")
self._conv3_15 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.15")
self._conv3_16 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.16")
self._conv3_17 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.17")
self._conv3_18 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.18")
self._conv3_19 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.19")
self._conv3_20 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.20")
self._conv3_21 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.21")
self._conv3_22 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.22")
self._conv4_0 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[3], stride=2, cardinality=self.cardinality, width=self.width, name="layer4.0")
self._conv4_1 = BottleneckBlock(
num_filters[3]//(width//8), num_filters[3], stride=1, cardinality=self.cardinality, width=self.width, name="layer4.1")
self._conv4_2 = BottleneckBlock(
num_filters[3]//(width//8), num_filters[3], stride=1, cardinality=self.cardinality, width=self.width, name="layer4.2")
self._avg_pool = Pool2D(pool_type="avg", global_pooling=True)
self._out = Linear(input_dim=num_filters[3]//(width//8),
output_dim=class_dim,
param_attr=ParamAttr(name="fc.weight"),
bias_attr=ParamAttr(name="fc.bias"))
def forward(self, inputs):
x = self._conv_stem(inputs)
x = self._pool(x)
x = self._conv1_0(x)
x = self._conv1_1(x)
x = self._conv1_2(x)
x = self._conv2_0(x)
x = self._conv2_1(x)
x = self._conv2_2(x)
x = self._conv2_3(x)
x = self._conv3_0(x)
x = self._conv3_1(x)
x = self._conv3_2(x)
x = self._conv3_3(x)
x = self._conv3_4(x)
x = self._conv3_5(x)
x = self._conv3_6(x)
x = self._conv3_7(x)
x = self._conv3_8(x)
x = self._conv3_9(x)
x = self._conv3_10(x)
x = self._conv3_11(x)
x = self._conv3_12(x)
x = self._conv3_13(x)
x = self._conv3_14(x)
x = self._conv3_15(x)
x = self._conv3_16(x)
x = self._conv3_17(x)
x = self._conv3_18(x)
x = self._conv3_19(x)
x = self._conv3_20(x)
x = self._conv3_21(x)
x = self._conv3_22(x)
x = self._conv4_0(x)
x = self._conv4_1(x)
x = self._conv4_2(x)
x = self._avg_pool(x)
x = fluid.layers.squeeze(x, axes=[2, 3])
x = self._out(x)
return x
def ResNeXt101_32x8d_wsl(**args):
model = ResNeXt101WSL(cardinality=32, width=8, **args)
return model return model
def ResNeXt101_32x16d_wsl(**args):
def ResNeXt101_32x32d_wsl(): model = ResNeXt101WSL(cardinality=32, width=16, **args)
model = ResNeXt101_wsl(cardinality=32, width=32)
return model return model
def ResNeXt101_32x32d_wsl(**args):
def ResNeXt101_32x48d_wsl(): model = ResNeXt101WSL(cardinality=32, width=32, **args)
model = ResNeXt101_wsl(cardinality=32, width=48)
return model return model
def ResNeXt101_32x48d_wsl(**args):
def Fix_ResNeXt101_32x48d_wsl(): model = ResNeXt101WSL(cardinality=32, width=48, **args)
model = ResNeXt101_wsl(cardinality=32, width=48)
return model return model
\ No newline at end of file
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
__all__ = ["SqueezeNet", "SqueezeNet1_0", "SqueezeNet1_1"] __all__ = ["SqueezeNet1_0", "SqueezeNet1_1"]
class SqueezeNet():
def __init__(self, version='1.0'):
self.version = version
def net(self, input, class_dim=1000):
version = self.version
assert version in ['1.0', '1.1'], \
"supported version are {} but input version is {}".format(['1.0', '1.1'], version)
if version == '1.0':
conv = fluid.layers.conv2d(
input,
num_filters=96,
filter_size=7,
stride=2,
act='relu',
param_attr=fluid.param_attr.ParamAttr(name="conv1_weights"),
bias_attr=ParamAttr(name='conv1_offset'))
conv = fluid.layers.pool2d(
conv, pool_size=3, pool_stride=2, pool_type='max')
conv = self.make_fire(conv, 16, 64, 64, name='fire2')
conv = self.make_fire(conv, 16, 64, 64, name='fire3')
conv = self.make_fire(conv, 32, 128, 128, name='fire4')
conv = fluid.layers.pool2d(
conv, pool_size=3, pool_stride=2, pool_type='max')
conv = self.make_fire(conv, 32, 128, 128, name='fire5')
conv = self.make_fire(conv, 48, 192, 192, name='fire6')
conv = self.make_fire(conv, 48, 192, 192, name='fire7')
conv = self.make_fire(conv, 64, 256, 256, name='fire8')
conv = fluid.layers.pool2d(
conv, pool_size=3, pool_stride=2, pool_type='max')
conv = self.make_fire(conv, 64, 256, 256, name='fire9')
else:
conv = fluid.layers.conv2d(
input,
num_filters=64,
filter_size=3,
stride=2,
padding=1,
act='relu',
param_attr=fluid.param_attr.ParamAttr(name="conv1_weights"),
bias_attr=ParamAttr(name='conv1_offset'))
conv = fluid.layers.pool2d(
conv, pool_size=3, pool_stride=2, pool_type='max')
conv = self.make_fire(conv, 16, 64, 64, name='fire2')
conv = self.make_fire(conv, 16, 64, 64, name='fire3')
conv = fluid.layers.pool2d(
conv, pool_size=3, pool_stride=2, pool_type='max')
conv = self.make_fire(conv, 32, 128, 128, name='fire4')
conv = self.make_fire(conv, 32, 128, 128, name='fire5')
conv = fluid.layers.pool2d(
conv, pool_size=3, pool_stride=2, pool_type='max')
conv = self.make_fire(conv, 48, 192, 192, name='fire6')
conv = self.make_fire(conv, 48, 192, 192, name='fire7')
conv = self.make_fire(conv, 64, 256, 256, name='fire8')
conv = self.make_fire(conv, 64, 256, 256, name='fire9')
conv = fluid.layers.dropout(conv, dropout_prob=0.5)
conv = fluid.layers.conv2d(
conv,
num_filters=class_dim,
filter_size=1,
act='relu',
param_attr=fluid.param_attr.ParamAttr(name="conv10_weights"),
bias_attr=ParamAttr(name='conv10_offset'))
conv = fluid.layers.pool2d(conv, pool_type='avg', global_pooling=True)
out = fluid.layers.flatten(conv)
return out
def make_fire_conv(self, class MakeFireConv(fluid.dygraph.Layer):
input, def __init__(self,
num_filters, input_channels,
output_channels,
filter_size, filter_size,
padding=0, padding=0,
name=None): name=None):
conv = fluid.layers.conv2d( super(MakeFireConv, self).__init__()
input, self._conv = Conv2D(input_channels,
num_filters=num_filters, output_channels,
filter_size=filter_size, filter_size,
padding=padding, padding=padding,
act='relu', act="relu",
param_attr=fluid.param_attr.ParamAttr(name=name + "_weights"), param_attr=ParamAttr(name=name + "_weights"),
bias_attr=ParamAttr(name=name + '_offset')) bias_attr=ParamAttr(name=name + "_offset"))
return conv
def forward(self, inputs):
return self._conv(inputs)
def make_fire(self, class MakeFire(fluid.dygraph.Layer):
input, def __init__(self,
input_channels,
squeeze_channels, squeeze_channels,
expand1x1_channels, expand1x1_channels,
expand3x3_channels, expand3x3_channels,
name=None): name=None):
conv = self.make_fire_conv( super(MakeFire, self).__init__()
input, squeeze_channels, 1, name=name + '_squeeze1x1') self._conv = MakeFireConv(input_channels,
conv_path1 = self.make_fire_conv( squeeze_channels,
conv, expand1x1_channels, 1, name=name + '_expand1x1') 1,
conv_path2 = self.make_fire_conv( name=name + "_squeeze1x1")
conv, expand3x3_channels, 3, 1, name=name + '_expand3x3') self._conv_path1 = MakeFireConv(squeeze_channels,
out = fluid.layers.concat([conv_path1, conv_path2], axis=1) expand1x1_channels,
return out 1,
name=name + "_expand1x1")
self._conv_path2 = MakeFireConv(squeeze_channels,
expand3x3_channels,
3,
padding=1,
name=name + "_expand3x3")
def forward(self, inputs):
x = self._conv(inputs)
x1 = self._conv_path1(x)
x2 = self._conv_path2(x)
return fluid.layers.concat([x1, x2], axis=1)
def SqueezeNet1_0(): class SqueezeNet(fluid.dygraph.Layer):
model = SqueezeNet(version='1.0') def __init__(self, version, class_dim=1000):
return model super(SqueezeNet, self).__init__()
self.version = version
if self.version == "1.0":
self._conv = Conv2D(3,
96,
7,
stride=2,
act="relu",
param_attr=ParamAttr(name="conv1_weights"),
bias_attr=ParamAttr(name="conv1_offset"))
self._pool = Pool2D(pool_size=3,
pool_stride=2,
pool_type="max")
self._conv1 = MakeFire(96, 16, 64, 64, name="fire2")
self._conv2 = MakeFire(128, 16, 64, 64, name="fire3")
self._conv3 = MakeFire(128, 32, 128, 128, name="fire4")
self._conv4 = MakeFire(256, 32, 128, 128, name="fire5")
self._conv5 = MakeFire(256, 48, 192, 192, name="fire6")
self._conv6 = MakeFire(384, 48, 192, 192, name="fire7")
self._conv7 = MakeFire(384, 64, 256, 256, name="fire8")
self._conv8 = MakeFire(512, 64, 256, 256, name="fire9")
else:
self._conv = Conv2D(3,
64,
3,
stride=2,
padding=1,
act="relu",
param_attr=ParamAttr(name="conv1_weights"),
bias_attr=ParamAttr(name="conv1_offset"))
self._pool = Pool2D(pool_size=3,
pool_stride=2,
pool_type="max")
self._conv1 = MakeFire(64, 16, 64, 64, name="fire2")
self._conv2 = MakeFire(128, 16, 64, 64, name="fire3")
self._conv3 = MakeFire(128, 32, 128, 128, name="fire4")
self._conv4 = MakeFire(256, 32, 128, 128, name="fire5")
self._conv5 = MakeFire(256, 48, 192, 192, name="fire6")
self._conv6 = MakeFire(384, 48, 192, 192, name="fire7")
self._conv7 = MakeFire(384, 64, 256, 256, name="fire8")
self._conv8 = MakeFire(512, 64, 256, 256, name="fire9")
self._drop = Dropout(p=0.5)
self._conv9 = Conv2D(512,
class_dim,
1,
act="relu",
param_attr=ParamAttr(name="conv10_weights"),
bias_attr=ParamAttr(name="conv10_offset"))
self._avg_pool = Pool2D(pool_type="avg",
global_pooling=True)
def forward(self, inputs):
x = self._conv(inputs)
x = self._pool(x)
if self.version=="1.0":
x = self._conv1(x)
x = self._conv2(x)
x = self._conv3(x)
x = self._pool(x)
x = self._conv4(x)
x = self._conv5(x)
x = self._conv6(x)
x = self._conv7(x)
x = self._pool(x)
x = self._conv8(x)
else:
x = self._conv1(x)
x = self._conv2(x)
x = self._pool(x)
x = self._conv3(x)
x = self._conv4(x)
x = self._pool(x)
x = self._conv5(x)
x = self._conv6(x)
x = self._conv7(x)
x = self._conv8(x)
x = self._drop(x)
x = self._conv9(x)
x = self._avg_pool(x)
x = fluid.layers.squeeze(x, axes=[2,3])
return x
def SqueezeNet1_0(**args):
model = SqueezeNet(version="1.0", **args)
return model
def SqueezeNet1_1(): def SqueezeNet1_1(**args):
model = SqueezeNet(version='1.1') model = SqueezeNet(version="1.1", **args)
return model return model
\ No newline at end of file
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
__all__ = ["VGGNet", "VGG11", "VGG13", "VGG16", "VGG19"] from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
__all__ = ["VGG11", "VGG13", "VGG16", "VGG19"]
class VGGNet():
def __init__(self, layers=16): class ConvBlock(fluid.dygraph.Layer):
self.layers = layers def __init__(self,
input_channels,
def net(self, input, class_dim=1000): output_channels,
layers = self.layers groups,
vgg_spec = { name=None):
11: ([1, 1, 2, 2, 2]), super(ConvBlock, self).__init__()
13: ([2, 2, 2, 2, 2]),
16: ([2, 2, 3, 3, 3]), self.groups = groups
19: ([2, 2, 4, 4, 4]) self._conv_1 = Conv2D(num_channels=input_channels,
} num_filters=output_channels,
assert layers in vgg_spec.keys(), \
"supported layers are {} but input layer is {}".format(vgg_spec.keys(), layers)
nums = vgg_spec[layers]
conv1 = self.conv_block(input, 64, nums[0], name="conv1_")
conv2 = self.conv_block(conv1, 128, nums[1], name="conv2_")
conv3 = self.conv_block(conv2, 256, nums[2], name="conv3_")
conv4 = self.conv_block(conv3, 512, nums[3], name="conv4_")
conv5 = self.conv_block(conv4, 512, nums[4], name="conv5_")
fc_dim = 4096
fc_name = ["fc6", "fc7", "fc8"]
fc1 = fluid.layers.fc(
input=conv5,
size=fc_dim,
act='relu',
param_attr=fluid.param_attr.ParamAttr(
name=fc_name[0] + "_weights"),
bias_attr=fluid.param_attr.ParamAttr(name=fc_name[0] + "_offset"))
fc1 = fluid.layers.dropout(x=fc1, dropout_prob=0.5)
fc2 = fluid.layers.fc(
input=fc1,
size=fc_dim,
act='relu',
param_attr=fluid.param_attr.ParamAttr(
name=fc_name[1] + "_weights"),
bias_attr=fluid.param_attr.ParamAttr(name=fc_name[1] + "_offset"))
fc2 = fluid.layers.dropout(x=fc2, dropout_prob=0.5)
out = fluid.layers.fc(
input=fc2,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
name=fc_name[2] + "_weights"),
bias_attr=fluid.param_attr.ParamAttr(name=fc_name[2] + "_offset"))
return out
def conv_block(self, input, num_filter, groups, name=None):
conv = input
for i in range(groups):
conv = fluid.layers.conv2d(
input=conv,
num_filters=num_filter,
filter_size=3, filter_size=3,
stride=1, stride=1,
padding=1, padding=1,
act='relu', act="relu",
param_attr=fluid.param_attr.ParamAttr( param_attr=ParamAttr(name=name + "1_weights"),
name=name + str(i + 1) + "_weights"),
bias_attr=False) bias_attr=False)
return fluid.layers.pool2d( if groups == 2 or groups == 3 or groups == 4:
input=conv, pool_size=2, pool_type='max', pool_stride=2) self._conv_2 = Conv2D(num_channels=output_channels,
num_filters=output_channels,
filter_size=3,
stride=1,
padding=1,
act="relu",
param_attr=ParamAttr(name=name + "2_weights"),
bias_attr=False)
if groups == 3 or groups == 4:
self._conv_3 = Conv2D(num_channels=output_channels,
num_filters=output_channels,
filter_size=3,
stride=1,
padding=1,
act="relu",
param_attr=ParamAttr(name=name + "3_weights"),
bias_attr=False)
if groups == 4:
self._conv_4 = Conv2D(num_channels=output_channels,
num_filters=output_channels,
filter_size=3,
stride=1,
padding=1,
act="relu",
param_attr=ParamAttr(name=name + "4_weights"),
bias_attr=False)
self._pool = Pool2D(pool_size=2,
pool_type="max",
pool_stride=2)
def forward(self, inputs):
x = self._conv_1(inputs)
if self.groups == 2 or self.groups == 3 or self.groups == 4:
x = self._conv_2(x)
if self.groups == 3 or self.groups == 4 :
x = self._conv_3(x)
if self.groups == 4:
x = self._conv_4(x)
x = self._pool(x)
return x
class VGGNet(fluid.dygraph.Layer):
def __init__(self, layers=11, class_dim=1000):
super(VGGNet, self).__init__()
def VGG11(): self.layers = layers
model = VGGNet(layers=11) self.vgg_configure = {11: [1, 1, 2, 2, 2],
13: [2, 2, 2, 2, 2],
16: [2, 2, 3, 3, 3],
19: [2, 2, 4, 4, 4]}
assert self.layers in self.vgg_configure.keys(), \
"supported layers are {} but input layer is {}".format(vgg_configure.keys(), layers)
self.groups = self.vgg_configure[self.layers]
self._conv_block_1 = ConvBlock(3, 64, self.groups[0], name="conv1_")
self._conv_block_2 = ConvBlock(64, 128, self.groups[1], name="conv2_")
self._conv_block_3 = ConvBlock(128, 256, self.groups[2], name="conv3_")
self._conv_block_4 = ConvBlock(256, 512, self.groups[3], name="conv4_")
self._conv_block_5 = ConvBlock(512, 512, self.groups[4], name="conv5_")
self._drop = fluid.dygraph.Dropout(p=0.5)
self._fc1 = Linear(input_dim=7*7*512,
output_dim=4096,
act="relu",
param_attr=ParamAttr(name="fc6_weights"),
bias_attr=ParamAttr(name="fc6_offset"))
self._fc2 = Linear(input_dim=4096,
output_dim=4096,
act="relu",
param_attr=ParamAttr(name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset"))
self._out = Linear(input_dim=4096,
output_dim=class_dim,
param_attr=ParamAttr(name="fc8_weights"),
bias_attr=ParamAttr(name="fc8_offset"))
def forward(self, inputs):
x = self._conv_block_1(inputs)
x = self._conv_block_2(x)
x = self._conv_block_3(x)
x = self._conv_block_4(x)
x = self._conv_block_5(x)
x = fluid.layers.flatten(x, axis=0)
x = self._fc1(x)
x = self._drop(x)
x = self._fc2(x)
x = self._drop(x)
x = self._out(x)
return x
def VGG11(**args):
model = VGGNet(layers=11, **args)
return model return model
def VGG13(**args):
def VGG13(): model = VGGNet(layers=13, **args)
model = VGGNet(layers=13)
return model return model
def VGG16(**args):
def VGG16(): model = VGGNet(layers=16, **args)
model = VGGNet(layers=16)
return model return model
def VGG19(**args):
def VGG19(): model = VGGNet(layers=19, **args)
model = VGGNet(layers=19)
return model return model
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册