未验证 提交 ea746480 编写于 作者: L littletomatodonkey 提交者: GitHub

Merge pull request #275 from littletomatodonkey/dyg/adp-2.0b

Fix clas api to paddle2.0.0b0
...@@ -17,7 +17,7 @@ import imghdr ...@@ -17,7 +17,7 @@ import imghdr
import os import os
import signal import signal
from paddle.fluid.io import multiprocess_reader from paddle.reader import multiprocess_reader
from . import imaug from . import imaug
from .imaug import transform from .imaug import transform
......
...@@ -23,7 +23,7 @@ from .se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd, SE_Res ...@@ -23,7 +23,7 @@ from .se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd, SE_Res
from .se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd from .se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd
from .dpn import DPN68 from .dpn import DPN68
from .densenet import DenseNet121 from .densenet import DenseNet121
from .hrnet import HRNet_W18_C from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W18_C, SE_HRNet_W30_C, SE_HRNet_W32_C, SE_HRNet_W40_C, SE_HRNet_W44_C, SE_HRNet_W48_C, SE_HRNet_W60_C, SE_HRNet_W64_C
from .efficientnet import EfficientNetB0 from .efficientnet import EfficientNetB0
from .resnest import ResNeSt50_fast_1s1x64d, ResNeSt50 from .resnest import ResNeSt50_fast_1s1x64d, ResNeSt50
from .googlenet import GoogLeNet from .googlenet import GoogLeNet
...@@ -31,5 +31,14 @@ from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75 ...@@ -31,5 +31,14 @@ from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75
from .mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0 from .mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0
from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25
from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish
from .alexnet import AlexNet
from .inception_v4 import InceptionV4
from .xception import Xception41, Xception65, Xception71
from .xception_deeplab import Xception41_deeplab, Xception65_deeplab, Xception71_deeplab
from .resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt101_32x32d_wsl, ResNeXt101_32x48d_wsl
from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish
from .squeezenet import SqueezeNet1_0, SqueezeNet1_1
from .vgg import VGG11, VGG13, VGG16, VGG19
from .darknet import DarkNet53
from .distillation_models import ResNet50_vd_distill_MobileNetV3_large_x1_0 from .distillation_models import ResNet50_vd_distill_MobileNetV3_large_x1_0
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout, ReLU
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
__all__ = ["AlexNet"] __all__ = ["AlexNet"]
class ConvPoolLayer(fluid.dygraph.Layer):
class ConvPoolLayer(nn.Layer):
def __init__(self, def __init__(self,
inputc_channels, input_channels,
output_channels, output_channels,
filter_size, filter_size,
stride, stride,
...@@ -19,85 +23,110 @@ class ConvPoolLayer(fluid.dygraph.Layer): ...@@ -19,85 +23,110 @@ class ConvPoolLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvPoolLayer, self).__init__() super(ConvPoolLayer, self).__init__()
self._conv = Conv2D(num_channels=inputc_channels, self.relu = ReLU() if act == "relu" else None
num_filters=output_channels,
filter_size=filter_size, self._conv = Conv2d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
groups=groups, groups=groups,
param_attr=ParamAttr(name=name + "_weights", weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)), name=name + "_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name=name + "_offset", bias_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)), name=name + "_offset", initializer=Uniform(-stdv, stdv)))
act=act) self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
self._pool = Pool2D(pool_size=3,
pool_stride=2,
pool_padding=0,
pool_type="max")
def forward(self, inputs): def forward(self, inputs):
x = self._conv(inputs) x = self._conv(inputs)
if self.relu is not None:
x = self.relu(x)
x = self._pool(x) x = self._pool(x)
return x return x
class AlexNetDY(fluid.dygraph.Layer): class AlexNetDY(nn.Layer):
def __init__(self, class_dim=1000): def __init__(self, class_dim=1000):
super(AlexNetDY, self).__init__() super(AlexNetDY, self).__init__()
stdv = 1.0/math.sqrt(3*11*11) stdv = 1.0 / math.sqrt(3 * 11 * 11)
self._conv1 = ConvPoolLayer( self._conv1 = ConvPoolLayer(
3, 64, 11, 4, 2, stdv, act="relu", name="conv1") 3, 64, 11, 4, 2, stdv, act="relu", name="conv1")
stdv = 1.0/math.sqrt(64*5*5) stdv = 1.0 / math.sqrt(64 * 5 * 5)
self._conv2 = ConvPoolLayer( self._conv2 = ConvPoolLayer(
64, 192, 5, 1, 2, stdv, act="relu", name="conv2") 64, 192, 5, 1, 2, stdv, act="relu", name="conv2")
stdv = 1.0/math.sqrt(192*3*3) stdv = 1.0 / math.sqrt(192 * 3 * 3)
self._conv3 = Conv2D(192, 384, 3, stride=1, padding=1, self._conv3 = Conv2d(
param_attr=ParamAttr(name="conv3_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)), 192,
bias_attr=ParamAttr(name="conv3_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)), 384,
act="relu") 3,
stdv = 1.0/math.sqrt(384*3*3) stride=1,
self._conv4 = Conv2D(384, 256, 3, stride=1, padding=1, padding=1,
param_attr=ParamAttr(name="conv4_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)), weight_attr=ParamAttr(
bias_attr=ParamAttr(name="conv4_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)), name="conv3_weights", initializer=Uniform(-stdv, stdv)),
act="relu") bias_attr=ParamAttr(
stdv = 1.0/math.sqrt(256*3*3) name="conv3_offset", initializer=Uniform(-stdv, stdv)))
stdv = 1.0 / math.sqrt(384 * 3 * 3)
self._conv4 = Conv2d(
384,
256,
3,
stride=1,
padding=1,
weight_attr=ParamAttr(
name="conv4_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(
name="conv4_offset", initializer=Uniform(-stdv, stdv)))
stdv = 1.0 / math.sqrt(256 * 3 * 3)
self._conv5 = ConvPoolLayer( self._conv5 = ConvPoolLayer(
256, 256, 3, 1, 1, stdv, act="relu", name="conv5") 256, 256, 3, 1, 1, stdv, act="relu", name="conv5")
stdv = 1.0/math.sqrt(256*6*6) stdv = 1.0 / math.sqrt(256 * 6 * 6)
self._drop1 = Dropout(p=0.5) self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc6 = Linear(input_dim=256*6*6, self._fc6 = Linear(
output_dim=4096, in_features=256 * 6 * 6,
param_attr=ParamAttr(name="fc6_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)), out_features=4096,
bias_attr=ParamAttr(name="fc6_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)), weight_attr=ParamAttr(
act="relu") name="fc6_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(
name="fc6_offset", initializer=Uniform(-stdv, stdv)))
self._drop2 = Dropout(p=0.5) self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc7 = Linear(input_dim=4096, self._fc7 = Linear(
output_dim=4096, in_features=4096,
param_attr=ParamAttr(name="fc7_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)), out_features=4096,
bias_attr=ParamAttr(name="fc7_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)), weight_attr=ParamAttr(
act="relu") name="fc7_weights", initializer=Uniform(-stdv, stdv)),
self._fc8 = Linear(input_dim=4096, bias_attr=ParamAttr(
output_dim=class_dim, name="fc7_offset", initializer=Uniform(-stdv, stdv)))
param_attr=ParamAttr(name="fc8_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)), self._fc8 = Linear(
bias_attr=ParamAttr(name="fc8_offset", initializer=fluid.initializer.Uniform(-stdv, stdv))) in_features=4096,
out_features=class_dim,
weight_attr=ParamAttr(
name="fc8_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(
name="fc8_offset", initializer=Uniform(-stdv, stdv)))
def forward(self, inputs): def forward(self, inputs):
x = self._conv1(inputs) x = self._conv1(inputs)
x = self._conv2(x) x = self._conv2(x)
x = self._conv3(x) x = self._conv3(x)
x = F.relu(x)
x = self._conv4(x) x = self._conv4(x)
x = F.relu(x)
x = self._conv5(x) x = self._conv5(x)
x = fluid.layers.flatten(x, axis=0) x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._drop1(x) x = self._drop1(x)
x = self._fc6(x) x = self._fc6(x)
x = F.relu(x)
x = self._drop2(x) x = self._drop2(x)
x = self._fc7(x) x = self._fc7(x)
x = F.relu(x)
x = self._fc8(x) x = self._fc8(x)
return x return x
def AlexNet(**args): def AlexNet(**args):
model = AlexNetDY(**args) model = AlexNetDY(**args)
return model return model
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
__all__ = [
"CSPResNet50_leaky", "CSPResNet50_mish", "CSPResNet101_leaky",
"CSPResNet101_mish"
]
class CSPResNet():
def __init__(self, layers=50, act="leaky_relu"):
self.layers = layers
self.act = act
def net(self, input, class_dim=1000, data_format="NCHW"):
layers = self.layers
supported_layers = [50, 101]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
if layers == 50:
depth = [3, 3, 5, 2]
elif layers == 101:
depth = [3, 3, 22, 2]
num_filters = [64, 128, 256, 512]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act=self.act,
name="conv1",
data_format=data_format)
conv = fluid.layers.pool2d(
input=conv,
pool_size=2,
pool_stride=2,
pool_padding=0,
pool_type='max',
data_format=data_format)
for block in range(len(depth)):
conv_name = "res" + str(block + 2) + chr(97)
if block != 0:
conv = self.conv_bn_layer(
input=conv,
num_filters=num_filters[block],
filter_size=3,
stride=2,
act=self.act,
name=conv_name + "_downsample",
data_format=data_format)
# split
left = conv
right = conv
if block == 0:
ch = num_filters[block]
else:
ch = num_filters[block] * 2
right = self.conv_bn_layer(
input=right,
num_filters=ch,
filter_size=1,
act=self.act,
name=conv_name + "_right_first_route",
data_format=data_format)
for i in range(depth[block]):
conv_name = "res" + str(block + 2) + chr(97 + i)
right = self.bottleneck_block(
input=right,
num_filters=num_filters[block],
stride=1,
name=conv_name,
data_format=data_format)
# route
left = self.conv_bn_layer(
input=left,
num_filters=num_filters[block] * 2,
filter_size=1,
act=self.act,
name=conv_name + "_left_route",
data_format=data_format)
right = self.conv_bn_layer(
input=right,
num_filters=num_filters[block] * 2,
filter_size=1,
act=self.act,
name=conv_name + "_right_route",
data_format=data_format)
conv = fluid.layers.concat([left, right], axis=1)
conv = self.conv_bn_layer(
input=conv,
num_filters=num_filters[block] * 2,
filter_size=1,
stride=1,
act=self.act,
name=conv_name + "_merged_transition",
data_format=data_format)
pool = fluid.layers.pool2d(
input=conv,
pool_type='avg',
global_pooling=True,
data_format=data_format)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
name="fc_0.w_0",
initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_0.b_0"))
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None,
data_format='NCHW'):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
name=name + '.conv2d.output.1',
data_format=data_format)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
bn = fluid.layers.batch_norm(
input=conv,
act=None,
name=bn_name + '.output.1',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance',
data_layout=data_format)
if act == "relu":
bn = fluid.layers.relu(bn)
elif act == "leaky_relu":
bn = fluid.layers.leaky_relu(bn)
elif act == "mish":
bn = self._mish(bn)
return bn
def _mish(self, input):
return input * fluid.layers.tanh(self._softplus(input))
def _softplus(self, input):
expf = fluid.layers.exp(fluid.layers.clip(input, -200, 50))
return fluid.layers.log(1 + expf)
def shortcut(self, input, ch_out, stride, is_first, name, data_format):
if data_format == 'NCHW':
ch_in = input.shape[1]
else:
ch_in = input.shape[-1]
if ch_in != ch_out or stride != 1 or is_first is True:
return self.conv_bn_layer(
input, ch_out, 1, stride, name=name, data_format=data_format)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name, data_format):
conv0 = self.conv_bn_layer(
input=input,
num_filters=num_filters,
filter_size=1,
act="leaky_relu",
name=name + "_branch2a",
data_format=data_format)
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act="leaky_relu",
name=name + "_branch2b",
data_format=data_format)
conv2 = self.conv_bn_layer(
input=conv1,
num_filters=num_filters * 2,
filter_size=1,
act=None,
name=name + "_branch2c",
data_format=data_format)
short = self.shortcut(
input,
num_filters * 2,
stride,
is_first=False,
name=name + "_branch1",
data_format=data_format)
ret = short + conv2
ret = fluid.layers.leaky_relu(ret, alpha=0.1)
return ret
def CSPResNet50_leaky():
model = CSPResNet(layers=50, act="leaky_relu")
return model
def CSPResNet50_mish():
model = CSPResNet(layers=50, act="mish")
return model
def CSPResNet101_leaky():
model = CSPResNet(layers=101, act="leaky_relu")
return model
def CSPResNet101_mish():
model = CSPResNet(layers=101, act="mish")
return model
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
__all__ = ["DarkNet53"] __all__ = ["DarkNet53"]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -17,14 +20,13 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -17,14 +20,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=input_channels, in_channels=input_channels,
num_filters=output_channels, out_channels=output_channels,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
act=None, weight_attr=ParamAttr(name=name + ".conv.weights"),
param_attr=ParamAttr(name=name + ".conv.weights"),
bias_attr=False) bias_attr=False)
bn_name = name + ".bn" bn_name = name + ".bn"
...@@ -42,7 +44,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -42,7 +44,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return x return x
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(nn.Layer):
def __init__(self, input_channels, output_channels, name=None): def __init__(self, input_channels, output_channels, name=None):
super(BasicBlock, self).__init__() super(BasicBlock, self).__init__()
...@@ -54,10 +56,10 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -54,10 +56,10 @@ class BasicBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = self._conv1(inputs) x = self._conv1(inputs)
x = self._conv2(x) x = self._conv2(x)
return fluid.layers.elementwise_add(x=inputs, y=x) return paddle.elementwise_add(x=inputs, y=x)
class DarkNet(fluid.dygraph.Layer): class DarkNet(nn.Layer):
def __init__(self, class_dim=1000): def __init__(self, class_dim=1000):
super(DarkNet, self).__init__() super(DarkNet, self).__init__()
...@@ -102,15 +104,14 @@ class DarkNet(fluid.dygraph.Layer): ...@@ -102,15 +104,14 @@ class DarkNet(fluid.dygraph.Layer):
self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2") self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2")
self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3") self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3")
self._pool = Pool2D(pool_type="avg", global_pooling=True) self._pool = AdaptiveAvgPool2d(1)
stdv = 1.0 / math.sqrt(1024.0) stdv = 1.0 / math.sqrt(1024.0)
self._out = Linear( self._out = Linear(
input_dim=1024, 1024,
output_dim=class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
name="fc_weights", name="fc_weights", initializer=Uniform(-stdv, stdv)),
initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -150,7 +151,7 @@ class DarkNet(fluid.dygraph.Layer): ...@@ -150,7 +151,7 @@ class DarkNet(fluid.dygraph.Layer):
x = self._basic_block_44(x) x = self._basic_block_44(x)
x = self._pool(x) x = self._pool(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
x = self._out(x) x = self._out(x)
return x return x
......
此差异已折叠。
...@@ -18,9 +18,11 @@ from __future__ import print_function ...@@ -18,9 +18,11 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -29,7 +31,7 @@ __all__ = [ ...@@ -29,7 +31,7 @@ __all__ = [
] ]
class BNACConvLayer(fluid.dygraph.Layer): class BNACConvLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -49,15 +51,14 @@ class BNACConvLayer(fluid.dygraph.Layer): ...@@ -49,15 +51,14 @@ class BNACConvLayer(fluid.dygraph.Layer):
moving_mean_name=name + '_bn_mean', moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance') moving_variance_name=name + '_bn_variance')
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=pad, padding=pad,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
def forward(self, input): def forward(self, input):
...@@ -66,7 +67,7 @@ class BNACConvLayer(fluid.dygraph.Layer): ...@@ -66,7 +67,7 @@ class BNACConvLayer(fluid.dygraph.Layer):
return y return y
class DenseLayer(fluid.dygraph.Layer): class DenseLayer(nn.Layer):
def __init__(self, num_channels, growth_rate, bn_size, dropout, name=None): def __init__(self, num_channels, growth_rate, bn_size, dropout, name=None):
super(DenseLayer, self).__init__() super(DenseLayer, self).__init__()
self.dropout = dropout self.dropout = dropout
...@@ -88,18 +89,18 @@ class DenseLayer(fluid.dygraph.Layer): ...@@ -88,18 +89,18 @@ class DenseLayer(fluid.dygraph.Layer):
name=name + "_x2") name=name + "_x2")
if dropout: if dropout:
self.dropout_func = Dropout(p=dropout) self.dropout_func = Dropout(p=dropout, mode="downscale_in_infer")
def forward(self, input): def forward(self, input):
conv = self.bn_ac_func1(input) conv = self.bn_ac_func1(input)
conv = self.bn_ac_func2(conv) conv = self.bn_ac_func2(conv)
if self.dropout: if self.dropout:
conv = self.dropout_func(conv) conv = self.dropout_func(conv)
conv = fluid.layers.concat([input, conv], axis=1) conv = paddle.concat([input, conv], axis=1)
return conv return conv
class DenseBlock(fluid.dygraph.Layer): class DenseBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_layers, num_layers,
...@@ -132,7 +133,7 @@ class DenseBlock(fluid.dygraph.Layer): ...@@ -132,7 +133,7 @@ class DenseBlock(fluid.dygraph.Layer):
return conv return conv
class TransitionLayer(fluid.dygraph.Layer): class TransitionLayer(nn.Layer):
def __init__(self, num_channels, num_output_features, name=None): def __init__(self, num_channels, num_output_features, name=None):
super(TransitionLayer, self).__init__() super(TransitionLayer, self).__init__()
...@@ -144,7 +145,7 @@ class TransitionLayer(fluid.dygraph.Layer): ...@@ -144,7 +145,7 @@ class TransitionLayer(fluid.dygraph.Layer):
stride=1, stride=1,
name=name) name=name)
self.pool2d_avg = Pool2D(pool_size=2, pool_stride=2, pool_type='avg') self.pool2d_avg = AvgPool2d(kernel_size=2, stride=2, padding=0)
def forward(self, input): def forward(self, input):
y = self.conv_ac_func(input) y = self.conv_ac_func(input)
...@@ -152,7 +153,7 @@ class TransitionLayer(fluid.dygraph.Layer): ...@@ -152,7 +153,7 @@ class TransitionLayer(fluid.dygraph.Layer):
return y return y
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -164,15 +165,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -164,15 +165,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=pad, padding=pad,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
self._batch_norm = BatchNorm( self._batch_norm = BatchNorm(
num_filters, num_filters,
...@@ -188,7 +188,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -188,7 +188,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class DenseNet(fluid.dygraph.Layer): class DenseNet(nn.Layer):
def __init__(self, layers=60, bn_size=4, dropout=0, class_dim=1000): def __init__(self, layers=60, bn_size=4, dropout=0, class_dim=1000):
super(DenseNet, self).__init__() super(DenseNet, self).__init__()
...@@ -214,8 +214,7 @@ class DenseNet(fluid.dygraph.Layer): ...@@ -214,8 +214,7 @@ class DenseNet(fluid.dygraph.Layer):
act='relu', act='relu',
name="conv1") name="conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_config = block_config self.block_config = block_config
...@@ -257,16 +256,15 @@ class DenseNet(fluid.dygraph.Layer): ...@@ -257,16 +256,15 @@ class DenseNet(fluid.dygraph.Layer):
moving_mean_name='conv5_blk_bn_mean', moving_mean_name='conv5_blk_bn_mean',
moving_variance_name='conv5_blk_bn_variance') moving_variance_name='conv5_blk_bn_variance')
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
stdv = 1.0 / math.sqrt(num_features * 1.0) stdv = 1.0 / math.sqrt(num_features * 1.0)
self.out = Linear( self.out = Linear(
num_features, num_features,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, input): def forward(self, input):
...@@ -280,7 +278,7 @@ class DenseNet(fluid.dygraph.Layer): ...@@ -280,7 +278,7 @@ class DenseNet(fluid.dygraph.Layer):
conv = self.batch_norm(conv) conv = self.batch_norm(conv)
y = self.pool2d_avg(conv) y = self.pool2d_avg(conv)
y = fluid.layers.reshape(y, shape=[0, -1]) y = paddle.reshape(y, shape=[0, -1])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -19,8 +19,7 @@ from __future__ import print_function ...@@ -19,8 +19,7 @@ from __future__ import print_function
import math import math
import paddle import paddle
import paddle.fluid as fluid import paddle.nn as nn
from paddle.fluid.param_attr import ParamAttr
from .resnet_vd import ResNet50_vd from .resnet_vd import ResNet50_vd
from .mobilenet_v3 import MobileNetV3_large_x1_0 from .mobilenet_v3 import MobileNetV3_large_x1_0
...@@ -32,7 +31,7 @@ __all__ = [ ...@@ -32,7 +31,7 @@ __all__ = [
] ]
class ResNet50_vd_distill_MobileNetV3_large_x1_0(fluid.dygraph.Layer): class ResNet50_vd_distill_MobileNetV3_large_x1_0(nn.Layer):
def __init__(self, class_dim=1000, **args): def __init__(self, class_dim=1000, **args):
super(ResNet50_vd_distill_MobileNetV3_large_x1_0, self).__init__() super(ResNet50_vd_distill_MobileNetV3_large_x1_0, self).__init__()
...@@ -49,7 +48,7 @@ class ResNet50_vd_distill_MobileNetV3_large_x1_0(fluid.dygraph.Layer): ...@@ -49,7 +48,7 @@ class ResNet50_vd_distill_MobileNetV3_large_x1_0(fluid.dygraph.Layer):
return teacher_label, student_label return teacher_label, student_label
class ResNeXt101_32x16d_wsl_distill_ResNet50_vd(fluid.dygraph.Layer): class ResNeXt101_32x16d_wsl_distill_ResNet50_vd(nn.Layer):
def __init__(self, class_dim=1000, **args): def __init__(self, class_dim=1000, **args):
super(ResNet50_vd_distill_MobileNetV3_large_x1_0, self).__init__() super(ResNet50_vd_distill_MobileNetV3_large_x1_0, self).__init__()
......
...@@ -19,9 +19,11 @@ from __future__ import print_function ...@@ -19,9 +19,11 @@ from __future__ import print_function
import numpy as np import numpy as np
import sys import sys
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.nn import Conv2d, BatchNorm, Linear
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -35,7 +37,7 @@ __all__ = [ ...@@ -35,7 +37,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -47,15 +49,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -47,15 +49,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=pad, padding=pad,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
self._batch_norm = BatchNorm( self._batch_norm = BatchNorm(
num_filters, num_filters,
...@@ -71,7 +72,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -71,7 +72,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BNACConvLayer(fluid.dygraph.Layer): class BNACConvLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -83,7 +84,6 @@ class BNACConvLayer(fluid.dygraph.Layer): ...@@ -83,7 +84,6 @@ class BNACConvLayer(fluid.dygraph.Layer):
name=None): name=None):
super(BNACConvLayer, self).__init__() super(BNACConvLayer, self).__init__()
self.num_channels = num_channels self.num_channels = num_channels
self.name = name
self._batch_norm = BatchNorm( self._batch_norm = BatchNorm(
num_channels, num_channels,
...@@ -93,15 +93,14 @@ class BNACConvLayer(fluid.dygraph.Layer): ...@@ -93,15 +93,14 @@ class BNACConvLayer(fluid.dygraph.Layer):
moving_mean_name=name + '_bn_mean', moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance') moving_variance_name=name + '_bn_variance')
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=pad, padding=pad,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
def forward(self, input): def forward(self, input):
...@@ -110,7 +109,7 @@ class BNACConvLayer(fluid.dygraph.Layer): ...@@ -110,7 +109,7 @@ class BNACConvLayer(fluid.dygraph.Layer):
return y return y
class DualPathFactory(fluid.dygraph.Layer): class DualPathFactory(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_1x1_a, num_1x1_a,
...@@ -183,14 +182,14 @@ class DualPathFactory(fluid.dygraph.Layer): ...@@ -183,14 +182,14 @@ class DualPathFactory(fluid.dygraph.Layer):
def forward(self, input): def forward(self, input):
# PROJ # PROJ
if isinstance(input, list): if isinstance(input, list):
data_in = fluid.layers.concat([input[0], input[1]], axis=1) data_in = paddle.concat([input[0], input[1]], axis=1)
else: else:
data_in = input data_in = input
if self.has_proj: if self.has_proj:
c1x1_w = self.c1x1_w_func(data_in) c1x1_w = self.c1x1_w_func(data_in)
data_o1, data_o2 = fluid.layers.split( data_o1, data_o2 = paddle.split(
c1x1_w, num_or_sections=[self.num_1x1_c, 2 * self.inc], dim=1) c1x1_w, num_or_sections=[self.num_1x1_c, 2 * self.inc], axis=1)
else: else:
data_o1 = input[0] data_o1 = input[0]
data_o2 = input[1] data_o2 = input[1]
...@@ -199,17 +198,17 @@ class DualPathFactory(fluid.dygraph.Layer): ...@@ -199,17 +198,17 @@ class DualPathFactory(fluid.dygraph.Layer):
c3x3_b = self.c3x3_b_func(c1x1_a) c3x3_b = self.c3x3_b_func(c1x1_a)
c1x1_c = self.c1x1_c_func(c3x3_b) c1x1_c = self.c1x1_c_func(c3x3_b)
c1x1_c1, c1x1_c2 = fluid.layers.split( c1x1_c1, c1x1_c2 = paddle.split(
c1x1_c, num_or_sections=[self.num_1x1_c, self.inc], dim=1) c1x1_c, num_or_sections=[self.num_1x1_c, self.inc], axis=1)
# OUTPUTS # OUTPUTS
summ = fluid.layers.elementwise_add(x=data_o1, y=c1x1_c1) summ = paddle.elementwise_add(x=data_o1, y=c1x1_c1)
dense = fluid.layers.concat([data_o2, c1x1_c2], axis=1) dense = paddle.concat([data_o2, c1x1_c2], axis=1)
# tensor, channels # tensor, channels
return [summ, dense] return [summ, dense]
class DPN(fluid.dygraph.Layer): class DPN(nn.Layer):
def __init__(self, layers=60, class_dim=1000): def __init__(self, layers=60, class_dim=1000):
super(DPN, self).__init__() super(DPN, self).__init__()
...@@ -237,8 +236,7 @@ class DPN(fluid.dygraph.Layer): ...@@ -237,8 +236,7 @@ class DPN(fluid.dygraph.Layer):
act='relu', act='relu',
name="conv1") name="conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
num_channel_dpn = init_num_filter num_channel_dpn = init_num_filter
...@@ -303,16 +301,15 @@ class DPN(fluid.dygraph.Layer): ...@@ -303,16 +301,15 @@ class DPN(fluid.dygraph.Layer):
moving_mean_name='final_concat_bn_mean', moving_mean_name='final_concat_bn_mean',
moving_variance_name='final_concat_bn_variance') moving_variance_name='final_concat_bn_variance')
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
stdv = 0.01 stdv = 0.01
self.out = Linear( self.out = Linear(
out_channel, out_channel,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, input): def forward(self, input):
...@@ -327,11 +324,11 @@ class DPN(fluid.dygraph.Layer): ...@@ -327,11 +324,11 @@ class DPN(fluid.dygraph.Layer):
convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x) convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x)
dpn_idx += 1 dpn_idx += 1
conv5_x_x = fluid.layers.concat(convX_x_x, axis=1) conv5_x_x = paddle.concat(convX_x_x, axis=1)
conv5_x_x = self.conv5_x_x_bn(conv5_x_x) conv5_x_x = self.conv5_x_x_bn(conv5_x_x)
y = self.pool2d_avg(conv5_x_x) y = self.pool2d_avg(conv5_x_x)
y = fluid.layers.reshape(y, shape=[0, -1]) y = paddle.reshape(y, shape=[0, -1])
y = self.out(y) y = self.out(y)
return y return y
......
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
import math import math
import collections import collections
import re import re
...@@ -242,15 +244,14 @@ def _drop_connect(inputs, prob, is_test): ...@@ -242,15 +244,14 @@ def _drop_connect(inputs, prob, is_test):
if is_test: if is_test:
return inputs return inputs
keep_prob = 1.0 - prob keep_prob = 1.0 - prob
inputs_shape = fluid.layers.shape(inputs) inputs_shape = paddle.shape(inputs)
random_tensor = keep_prob + fluid.layers.uniform_random( random_tensor = keep_prob + paddle.rand(shape=[inputs_shape[0], 1, 1, 1])
shape=[inputs_shape[0], 1, 1, 1], min=0., max=1.) binary_tensor = paddle.floor(random_tensor)
binary_tensor = fluid.layers.floor(random_tensor)
output = inputs / keep_prob * binary_tensor output = inputs / keep_prob * binary_tensor
return output return output
class Conv2ds(fluid.dygraph.Layer): class Conv2ds(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -265,6 +266,8 @@ class Conv2ds(fluid.dygraph.Layer): ...@@ -265,6 +266,8 @@ class Conv2ds(fluid.dygraph.Layer):
model_name=None, model_name=None,
cur_stage=None): cur_stage=None):
super(Conv2ds, self).__init__() super(Conv2ds, self).__init__()
assert act in [None, "swish", "sigmoid"]
self.act = act
param_attr, bias_attr = initial_type(name=name, use_bias=use_bias) param_attr, bias_attr = initial_type(name=name, use_bias=use_bias)
...@@ -296,25 +299,31 @@ class Conv2ds(fluid.dygraph.Layer): ...@@ -296,25 +299,31 @@ class Conv2ds(fluid.dygraph.Layer):
else: else:
padding = padding_type padding = padding_type
self._conv = Conv2D( groups = 1 if groups is None else groups
self._conv = Conv2d(
input_channels, input_channels,
output_channels, output_channels,
filter_size, filter_size,
groups=groups, groups=groups,
stride=stride, stride=stride,
act=act, # act=act,
padding=padding, padding=padding,
param_attr=param_attr, weight_attr=param_attr,
bias_attr=bias_attr) bias_attr=bias_attr)
def forward(self, inputs): def forward(self, inputs):
x = self._conv(inputs) x = self._conv(inputs)
if self.act == "swish":
x = F.swish(x)
elif self.act == "sigmoid":
x = F.sigmoid(x)
if self.need_crop: if self.need_crop:
x = x[:, :, 1:, 1:] x = x[:, :, 1:, 1:]
return x return x
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
filter_size, filter_size,
...@@ -369,7 +378,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -369,7 +378,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class ExpandConvNorm(fluid.dygraph.Layer): class ExpandConvNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -402,7 +411,7 @@ class ExpandConvNorm(fluid.dygraph.Layer): ...@@ -402,7 +411,7 @@ class ExpandConvNorm(fluid.dygraph.Layer):
return inputs return inputs
class DepthwiseConvNorm(fluid.dygraph.Layer): class DepthwiseConvNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -436,7 +445,7 @@ class DepthwiseConvNorm(fluid.dygraph.Layer): ...@@ -436,7 +445,7 @@ class DepthwiseConvNorm(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class ProjectConvNorm(fluid.dygraph.Layer): class ProjectConvNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -464,7 +473,7 @@ class ProjectConvNorm(fluid.dygraph.Layer): ...@@ -464,7 +473,7 @@ class ProjectConvNorm(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class SEBlock(fluid.dygraph.Layer): class SEBlock(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
num_squeezed_channels, num_squeezed_channels,
...@@ -475,8 +484,7 @@ class SEBlock(fluid.dygraph.Layer): ...@@ -475,8 +484,7 @@ class SEBlock(fluid.dygraph.Layer):
cur_stage=None): cur_stage=None):
super(SEBlock, self).__init__() super(SEBlock, self).__init__()
self._pool = Pool2D( self._pool = AdaptiveAvgPool2d(1)
pool_type="avg", global_pooling=True, use_cudnn=False)
self._conv1 = Conv2ds( self._conv1 = Conv2ds(
input_channels, input_channels,
num_squeezed_channels, num_squeezed_channels,
...@@ -499,10 +507,10 @@ class SEBlock(fluid.dygraph.Layer): ...@@ -499,10 +507,10 @@ class SEBlock(fluid.dygraph.Layer):
x = self._pool(inputs) x = self._pool(inputs)
x = self._conv1(x) x = self._conv1(x)
x = self._conv2(x) x = self._conv2(x)
return fluid.layers.elementwise_mul(inputs, x) return paddle.multiply(inputs, x)
class MbConvBlock(fluid.dygraph.Layer): class MbConvBlock(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -565,9 +573,9 @@ class MbConvBlock(fluid.dygraph.Layer): ...@@ -565,9 +573,9 @@ class MbConvBlock(fluid.dygraph.Layer):
x = inputs x = inputs
if self.expand_ratio != 1: if self.expand_ratio != 1:
x = self._ecn(x) x = self._ecn(x)
x = fluid.layers.swish(x) x = F.swish(x)
x = self._dcn(x) x = self._dcn(x)
x = fluid.layers.swish(x) x = F.swish(x)
if self.has_se: if self.has_se:
x = self._se(x) x = self._se(x)
x = self._pcn(x) x = self._pcn(x)
...@@ -576,11 +584,11 @@ class MbConvBlock(fluid.dygraph.Layer): ...@@ -576,11 +584,11 @@ class MbConvBlock(fluid.dygraph.Layer):
self.block_args.input_filters == self.block_args.output_filters: self.block_args.input_filters == self.block_args.output_filters:
if self.drop_connect_rate: if self.drop_connect_rate:
x = _drop_connect(x, self.drop_connect_rate, self.is_test) x = _drop_connect(x, self.drop_connect_rate, self.is_test)
x = fluid.layers.elementwise_add(x, inputs) x = paddle.elementwise_add(x, inputs)
return x return x
class ConvStemNorm(fluid.dygraph.Layer): class ConvStemNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
padding_type, padding_type,
...@@ -608,7 +616,7 @@ class ConvStemNorm(fluid.dygraph.Layer): ...@@ -608,7 +616,7 @@ class ConvStemNorm(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class ExtractFeatures(fluid.dygraph.Layer): class ExtractFeatures(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
_block_args, _block_args,
...@@ -694,13 +702,13 @@ class ExtractFeatures(fluid.dygraph.Layer): ...@@ -694,13 +702,13 @@ class ExtractFeatures(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = self._conv_stem(inputs) x = self._conv_stem(inputs)
x = fluid.layers.swish(x) x = F.swish(x)
for _mc_block in self.conv_seq: for _mc_block in self.conv_seq:
x = _mc_block(x) x = _mc_block(x)
return x return x
class EfficientNet(fluid.dygraph.Layer): class EfficientNet(nn.Layer):
def __init__(self, def __init__(self,
name="b0", name="b0",
is_test=True, is_test=True,
...@@ -753,18 +761,17 @@ class EfficientNet(fluid.dygraph.Layer): ...@@ -753,18 +761,17 @@ class EfficientNet(fluid.dygraph.Layer):
bn_name="_bn1", bn_name="_bn1",
model_name=self.name, model_name=self.name,
cur_stage=7) cur_stage=7)
self._pool = Pool2D(pool_type="avg", global_pooling=True) self._pool = AdaptiveAvgPool2d(1)
if self._global_params.dropout_rate: if self._global_params.dropout_rate:
self._drop = Dropout( self._drop = Dropout(
p=self._global_params.dropout_rate, p=self._global_params.dropout_rate, mode="upscale_in_train")
dropout_implementation="upscale_in_train")
param_attr, bias_attr = init_fc_layer("_fc") param_attr, bias_attr = init_fc_layer("_fc")
self._fc = Linear( self._fc = Linear(
output_channels, output_channels,
class_dim, class_dim,
param_attr=param_attr, weight_attr=param_attr,
bias_attr=bias_attr) bias_attr=bias_attr)
def forward(self, inputs): def forward(self, inputs):
...@@ -773,7 +780,7 @@ class EfficientNet(fluid.dygraph.Layer): ...@@ -773,7 +780,7 @@ class EfficientNet(fluid.dygraph.Layer):
x = self._pool(x) x = self._pool(x)
if self._global_params.dropout_rate: if self._global_params.dropout_rate:
x = self._drop(x) x = self._drop(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
x = self._fc(x) x = self._fc(x)
return x return x
......
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
__all__ = ['GoogLeNet'] __all__ = ['GoogLeNet']
...@@ -10,12 +14,11 @@ __all__ = ['GoogLeNet'] ...@@ -10,12 +14,11 @@ __all__ = ['GoogLeNet']
def xavier(channels, filter_size, name): def xavier(channels, filter_size, name):
stdv = (3.0 / (filter_size**2 * channels))**0.5 stdv = (3.0 / (filter_size**2 * channels))**0.5
param_attr = ParamAttr( param_attr = ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name=name + "_weights")
name=name + "_weights")
return param_attr return param_attr
class ConvLayer(fluid.dygraph.Layer): class ConvLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -26,15 +29,14 @@ class ConvLayer(fluid.dygraph.Layer): ...@@ -26,15 +29,14 @@ class ConvLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvLayer, self).__init__() super(ConvLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
def forward(self, inputs): def forward(self, inputs):
...@@ -42,7 +44,7 @@ class ConvLayer(fluid.dygraph.Layer): ...@@ -42,7 +44,7 @@ class ConvLayer(fluid.dygraph.Layer):
return y return y
class Inception(fluid.dygraph.Layer): class Inception(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -71,8 +73,8 @@ class Inception(fluid.dygraph.Layer): ...@@ -71,8 +73,8 @@ class Inception(fluid.dygraph.Layer):
name="inception_" + name + "_5x5_reduce") name="inception_" + name + "_5x5_reduce")
self._conv5 = ConvLayer( self._conv5 = ConvLayer(
filter5R, filter5, 5, name="inception_" + name + "_5x5") filter5R, filter5, 5, name="inception_" + name + "_5x5")
self._pool = Pool2D( self._pool = MaxPool2d(kernel_size=3, stride=1, padding=1)
pool_size=3, pool_type="max", pool_stride=1, pool_padding=1)
self._convprj = ConvLayer( self._convprj = ConvLayer(
input_channels, proj, 1, name="inception_" + name + "_3x3_proj") input_channels, proj, 1, name="inception_" + name + "_3x3_proj")
...@@ -88,16 +90,16 @@ class Inception(fluid.dygraph.Layer): ...@@ -88,16 +90,16 @@ class Inception(fluid.dygraph.Layer):
pool = self._pool(inputs) pool = self._pool(inputs)
convprj = self._convprj(pool) convprj = self._convprj(pool)
cat = fluid.layers.concat([conv1, conv3, conv5, convprj], axis=1) cat = paddle.concat([conv1, conv3, conv5, convprj], axis=1)
cat = fluid.layers.relu(cat) cat = F.relu(cat)
return cat return cat
class GoogleNetDY(fluid.dygraph.Layer): class GoogleNetDY(nn.Layer):
def __init__(self, class_dim=1000): def __init__(self, class_dim=1000):
super(GoogleNetDY, self).__init__() super(GoogleNetDY, self).__init__()
self._conv = ConvLayer(3, 64, 7, 2, name="conv1") self._conv = ConvLayer(3, 64, 7, 2, name="conv1")
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2)
self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1") self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1")
self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3") self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3")
...@@ -122,42 +124,39 @@ class GoogleNetDY(fluid.dygraph.Layer): ...@@ -122,42 +124,39 @@ class GoogleNetDY(fluid.dygraph.Layer):
self._ince5b = Inception( self._ince5b = Inception(
832, 832, 384, 192, 384, 48, 128, 128, name="ince5b") 832, 832, 384, 192, 384, 48, 128, 128, name="ince5b")
self._pool_5 = Pool2D(pool_size=7, pool_type='avg', pool_stride=7) self._pool_5 = AvgPool2d(kernel_size=7, stride=7)
self._drop = fluid.dygraph.Dropout(p=0.4) self._drop = Dropout(p=0.4, mode="downscale_in_infer")
self._fc_out = Linear( self._fc_out = Linear(
1024, 1024,
class_dim, class_dim,
param_attr=xavier(1024, 1, "out"), weight_attr=xavier(1024, 1, "out"),
bias_attr=ParamAttr(name="out_offset"), bias_attr=ParamAttr(name="out_offset"))
act="softmax") self._pool_o1 = AvgPool2d(kernel_size=5, stride=3)
self._pool_o1 = Pool2D(pool_size=5, pool_stride=3, pool_type="avg")
self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1") self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1")
self._fc_o1 = Linear( self._fc_o1 = Linear(
1152, 1152,
1024, 1024,
param_attr=xavier(2048, 1, "fc_o1"), weight_attr=xavier(2048, 1, "fc_o1"),
bias_attr=ParamAttr(name="fc_o1_offset"), bias_attr=ParamAttr(name="fc_o1_offset"))
act="relu") self._drop_o1 = Dropout(p=0.7, mode="downscale_in_infer")
self._drop_o1 = fluid.dygraph.Dropout(p=0.7)
self._out1 = Linear( self._out1 = Linear(
1024, 1024,
class_dim, class_dim,
param_attr=xavier(1024, 1, "out1"), weight_attr=xavier(1024, 1, "out1"),
bias_attr=ParamAttr(name="out1_offset"), bias_attr=ParamAttr(name="out1_offset"))
act="softmax") self._pool_o2 = AvgPool2d(kernel_size=5, stride=3)
self._pool_o2 = Pool2D(pool_size=5, pool_stride=3, pool_type='avg')
self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2") self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2")
self._fc_o2 = Linear( self._fc_o2 = Linear(
1152, 1152,
1024, 1024,
param_attr=xavier(2048, 1, "fc_o2"), weight_attr=xavier(2048, 1, "fc_o2"),
bias_attr=ParamAttr(name="fc_o2_offset")) bias_attr=ParamAttr(name="fc_o2_offset"))
self._drop_o2 = fluid.dygraph.Dropout(p=0.7) self._drop_o2 = Dropout(p=0.7, mode="downscale_in_infer")
self._out2 = Linear( self._out2 = Linear(
1024, 1024,
class_dim, class_dim,
param_attr=xavier(1024, 1, "out2"), weight_attr=xavier(1024, 1, "out2"),
bias_attr=ParamAttr(name="out2_offset")) bias_attr=ParamAttr(name="out2_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -183,19 +182,22 @@ class GoogleNetDY(fluid.dygraph.Layer): ...@@ -183,19 +182,22 @@ class GoogleNetDY(fluid.dygraph.Layer):
x = self._pool_5(ince5b) x = self._pool_5(ince5b)
x = self._drop(x) x = self._drop(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
out = self._fc_out(x) out = self._fc_out(x)
out = F.softmax(out)
x = self._pool_o1(ince4a) x = self._pool_o1(ince4a)
x = self._conv_o1(x) x = self._conv_o1(x)
x = fluid.layers.flatten(x) x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._fc_o1(x) x = self._fc_o1(x)
x = F.relu(x)
x = self._drop_o1(x) x = self._drop_o1(x)
out1 = self._out1(x) out1 = self._out1(x)
out1 = F.softmax(out1)
x = self._pool_o2(ince4d) x = self._pool_o2(ince4d)
x = self._conv_o2(x) x = self._conv_o2(x)
x = fluid.layers.flatten(x) x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._fc_o2(x) x = self._fc_o2(x)
x = self._drop_o2(x) x = self._drop_o2(x)
out2 = self._out2(x) out2 = self._out2(x)
......
...@@ -18,9 +18,12 @@ from __future__ import print_function ...@@ -18,9 +18,12 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -44,7 +47,7 @@ __all__ = [ ...@@ -44,7 +47,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -55,15 +58,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -55,15 +58,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
bn_name = name + '_bn' bn_name = name + '_bn'
self._batch_norm = BatchNorm( self._batch_norm = BatchNorm(
...@@ -80,7 +82,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -80,7 +82,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class Layer1(fluid.dygraph.Layer): class Layer1(nn.Layer):
def __init__(self, num_channels, has_se=False, name=None): def __init__(self, num_channels, has_se=False, name=None):
super(Layer1, self).__init__() super(Layer1, self).__init__()
...@@ -105,7 +107,7 @@ class Layer1(fluid.dygraph.Layer): ...@@ -105,7 +107,7 @@ class Layer1(fluid.dygraph.Layer):
return conv return conv
class TransitionLayer(fluid.dygraph.Layer): class TransitionLayer(nn.Layer):
def __init__(self, in_channels, out_channels, name=None): def __init__(self, in_channels, out_channels, name=None):
super(TransitionLayer, self).__init__() super(TransitionLayer, self).__init__()
...@@ -148,7 +150,7 @@ class TransitionLayer(fluid.dygraph.Layer): ...@@ -148,7 +150,7 @@ class TransitionLayer(fluid.dygraph.Layer):
return outs return outs
class Branches(fluid.dygraph.Layer): class Branches(nn.Layer):
def __init__(self, def __init__(self,
block_num, block_num,
in_channels, in_channels,
...@@ -183,7 +185,7 @@ class Branches(fluid.dygraph.Layer): ...@@ -183,7 +185,7 @@ class Branches(fluid.dygraph.Layer):
return outs return outs
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -243,11 +245,11 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -243,11 +245,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
if self.has_se: if self.has_se:
conv3 = self.se(conv3) conv3 = self.se(conv3)
y = fluid.layers.elementwise_add(x=conv3, y=residual, act="relu") y = paddle.elementwise_add(x=conv3, y=residual, act="relu")
return y return y
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -301,15 +303,15 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -301,15 +303,15 @@ class BasicBlock(fluid.dygraph.Layer):
if self.has_se: if self.has_se:
conv2 = self.se(conv2) conv2 = self.se(conv2)
y = fluid.layers.elementwise_add(x=conv2, y=residual, act="relu") y = paddle.elementwise_add(x=conv2, y=residual, act="relu")
return y return y
class SELayer(fluid.dygraph.Layer): class SELayer(nn.Layer):
def __init__(self, num_channels, num_filters, reduction_ratio, name=None): def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
super(SELayer, self).__init__() super(SELayer, self).__init__()
self.pool2d_gap = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_gap = AdaptiveAvgPool2d(1)
self._num_channels = num_channels self._num_channels = num_channels
...@@ -320,8 +322,7 @@ class SELayer(fluid.dygraph.Layer): ...@@ -320,8 +322,7 @@ class SELayer(fluid.dygraph.Layer):
med_ch, med_ch,
act="relu", act="relu",
param_attr=ParamAttr( param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"),
name=name + "_sqz_weights"),
bias_attr=ParamAttr(name=name + '_sqz_offset')) bias_attr=ParamAttr(name=name + '_sqz_offset'))
stdv = 1.0 / math.sqrt(med_ch * 1.0) stdv = 1.0 / math.sqrt(med_ch * 1.0)
...@@ -330,22 +331,21 @@ class SELayer(fluid.dygraph.Layer): ...@@ -330,22 +331,21 @@ class SELayer(fluid.dygraph.Layer):
num_filters, num_filters,
act="sigmoid", act="sigmoid",
param_attr=ParamAttr( param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"),
name=name + "_exc_weights"),
bias_attr=ParamAttr(name=name + '_exc_offset')) bias_attr=ParamAttr(name=name + '_exc_offset'))
def forward(self, input): def forward(self, input):
pool = self.pool2d_gap(input) pool = self.pool2d_gap(input)
pool = fluid.layers.reshape(pool, shape=[-1, self._num_channels]) pool = paddle.reshape(pool, shape=[-1, self._num_channels])
squeeze = self.squeeze(pool) squeeze = self.squeeze(pool)
excitation = self.excitation(squeeze) excitation = self.excitation(squeeze)
excitation = fluid.layers.reshape( excitation = paddle.reshape(
excitation, shape=[-1, self._num_channels, 1, 1]) excitation, shape=[-1, self._num_channels, 1, 1])
out = input * excitation out = input * excitation
return out return out
class Stage(fluid.dygraph.Layer): class Stage(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_modules, num_modules,
...@@ -386,7 +386,7 @@ class Stage(fluid.dygraph.Layer): ...@@ -386,7 +386,7 @@ class Stage(fluid.dygraph.Layer):
return out return out
class HighResolutionModule(fluid.dygraph.Layer): class HighResolutionModule(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -414,7 +414,7 @@ class HighResolutionModule(fluid.dygraph.Layer): ...@@ -414,7 +414,7 @@ class HighResolutionModule(fluid.dygraph.Layer):
return out return out
class FuseLayers(fluid.dygraph.Layer): class FuseLayers(nn.Layer):
def __init__(self, def __init__(self,
in_channels, in_channels,
out_channels, out_channels,
...@@ -482,8 +482,8 @@ class FuseLayers(fluid.dygraph.Layer): ...@@ -482,8 +482,8 @@ class FuseLayers(fluid.dygraph.Layer):
y = self.residual_func_list[residual_func_idx](input[j]) y = self.residual_func_list[residual_func_idx](input[j])
residual_func_idx += 1 residual_func_idx += 1
y = fluid.layers.resize_nearest(input=y, scale=2**(j - i)) y = F.resize_nearest(input=y, scale=2**(j - i))
residual = fluid.layers.elementwise_add( residual = paddle.elementwise_add(
x=residual, y=y, act=None) x=residual, y=y, act=None)
elif j < i: elif j < i:
y = input[j] y = input[j]
...@@ -491,16 +491,16 @@ class FuseLayers(fluid.dygraph.Layer): ...@@ -491,16 +491,16 @@ class FuseLayers(fluid.dygraph.Layer):
y = self.residual_func_list[residual_func_idx](y) y = self.residual_func_list[residual_func_idx](y)
residual_func_idx += 1 residual_func_idx += 1
residual = fluid.layers.elementwise_add( residual = paddle.elementwise_add(
x=residual, y=y, act=None) x=residual, y=y, act=None)
residual = fluid.layers.relu(residual) residual = F.relu(residual)
outs.append(residual) outs.append(residual)
return outs return outs
class LastClsOut(fluid.dygraph.Layer): class LastClsOut(nn.Layer):
def __init__(self, def __init__(self,
num_channel_list, num_channel_list,
has_se, has_se,
...@@ -528,7 +528,7 @@ class LastClsOut(fluid.dygraph.Layer): ...@@ -528,7 +528,7 @@ class LastClsOut(fluid.dygraph.Layer):
return outs return outs
class HRNet(fluid.dygraph.Layer): class HRNet(nn.Layer):
def __init__(self, width=18, has_se=False, class_dim=1000): def __init__(self, width=18, has_se=False, class_dim=1000):
super(HRNet, self).__init__() super(HRNet, self).__init__()
...@@ -623,16 +623,15 @@ class HRNet(fluid.dygraph.Layer): ...@@ -623,16 +623,15 @@ class HRNet(fluid.dygraph.Layer):
stride=1, stride=1,
name="cls_head_last_conv") name="cls_head_last_conv")
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
stdv = 1.0 / math.sqrt(2048 * 1.0) stdv = 1.0 / math.sqrt(2048 * 1.0)
self.out = Linear( self.out = Linear(
2048, 2048,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, input): def forward(self, input):
...@@ -658,7 +657,7 @@ class HRNet(fluid.dygraph.Layer): ...@@ -658,7 +657,7 @@ class HRNet(fluid.dygraph.Layer):
y = self.conv_last(y) y = self.conv_last(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[0, -1]) y = paddle.reshape(y, shape=[0, -1])
y = self.out(y) y = self.out(y)
return y return y
......
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
__all__ = ["InceptionV4"] __all__ = ["InceptionV4"]
class ConvBNLayer(fluid.dygraph.Layer):
class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -18,15 +36,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -18,15 +36,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
bn_name = name + "_bn" bn_name = name + "_bn"
self._batch_norm = BatchNorm( self._batch_norm = BatchNorm(
...@@ -43,7 +60,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -43,7 +60,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class InceptionStem(fluid.dygraph.Layer): class InceptionStem(nn.Layer):
def __init__(self): def __init__(self):
super(InceptionStem, self).__init__() super(InceptionStem, self).__init__()
self._conv_1 = ConvBNLayer( self._conv_1 = ConvBNLayer(
...@@ -51,7 +68,7 @@ class InceptionStem(fluid.dygraph.Layer): ...@@ -51,7 +68,7 @@ class InceptionStem(fluid.dygraph.Layer):
self._conv_2 = ConvBNLayer(32, 32, 3, act="relu", name="conv2_3x3_s1") self._conv_2 = ConvBNLayer(32, 32, 3, act="relu", name="conv2_3x3_s1")
self._conv_3 = ConvBNLayer( self._conv_3 = ConvBNLayer(
32, 64, 3, padding=1, act="relu", name="conv3_3x3_s1") 32, 64, 3, padding=1, act="relu", name="conv3_3x3_s1")
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
64, 96, 3, stride=2, act="relu", name="inception_stem1_3x3_s2") 64, 96, 3, stride=2, act="relu", name="inception_stem1_3x3_s2")
self._conv1_1 = ConvBNLayer( self._conv1_1 = ConvBNLayer(
...@@ -84,7 +101,7 @@ class InceptionStem(fluid.dygraph.Layer): ...@@ -84,7 +101,7 @@ class InceptionStem(fluid.dygraph.Layer):
pool1 = self._pool(conv) pool1 = self._pool(conv)
conv2 = self._conv2(conv) conv2 = self._conv2(conv)
concat = fluid.layers.concat([pool1, conv2], axis=1) concat = paddle.concat([pool1, conv2], axis=1)
conv1 = self._conv1_1(concat) conv1 = self._conv1_1(concat)
conv1 = self._conv1_2(conv1) conv1 = self._conv1_2(conv1)
...@@ -94,19 +111,19 @@ class InceptionStem(fluid.dygraph.Layer): ...@@ -94,19 +111,19 @@ class InceptionStem(fluid.dygraph.Layer):
conv2 = self._conv2_3(conv2) conv2 = self._conv2_3(conv2)
conv2 = self._conv2_4(conv2) conv2 = self._conv2_4(conv2)
concat = fluid.layers.concat([conv1, conv2], axis=1) concat = paddle.concat([conv1, conv2], axis=1)
conv1 = self._conv3(concat) conv1 = self._conv3(concat)
pool1 = self._pool(concat) pool1 = self._pool(concat)
concat = fluid.layers.concat([conv1, pool1], axis=1) concat = paddle.concat([conv1, pool1], axis=1)
return concat return concat
class InceptionA(fluid.dygraph.Layer): class InceptionA(nn.Layer):
def __init__(self, name): def __init__(self, name):
super(InceptionA, self).__init__() super(InceptionA, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="avg", pool_padding=1) self._pool = AvgPool2d(kernel_size=3, stride=1, padding=1)
self._conv1 = ConvBNLayer( self._conv1 = ConvBNLayer(
384, 96, 1, act="relu", name="inception_a" + name + "_1x1") 384, 96, 1, act="relu", name="inception_a" + name + "_1x1")
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
...@@ -154,14 +171,14 @@ class InceptionA(fluid.dygraph.Layer): ...@@ -154,14 +171,14 @@ class InceptionA(fluid.dygraph.Layer):
conv4 = self._conv4_2(conv4) conv4 = self._conv4_2(conv4)
conv4 = self._conv4_3(conv4) conv4 = self._conv4_3(conv4)
concat = fluid.layers.concat([conv1, conv2, conv3, conv4], axis=1) concat = paddle.concat([conv1, conv2, conv3, conv4], axis=1)
return concat return concat
class ReductionA(fluid.dygraph.Layer): class ReductionA(nn.Layer):
def __init__(self): def __init__(self):
super(ReductionA, self).__init__() super(ReductionA, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
384, 384, 3, stride=2, act="relu", name="reduction_a_3x3") 384, 384, 3, stride=2, act="relu", name="reduction_a_3x3")
self._conv3_1 = ConvBNLayer( self._conv3_1 = ConvBNLayer(
...@@ -177,14 +194,14 @@ class ReductionA(fluid.dygraph.Layer): ...@@ -177,14 +194,14 @@ class ReductionA(fluid.dygraph.Layer):
conv3 = self._conv3_1(inputs) conv3 = self._conv3_1(inputs)
conv3 = self._conv3_2(conv3) conv3 = self._conv3_2(conv3)
conv3 = self._conv3_3(conv3) conv3 = self._conv3_3(conv3)
concat = fluid.layers.concat([pool1, conv2, conv3], axis=1) concat = paddle.concat([pool1, conv2, conv3], axis=1)
return concat return concat
class InceptionB(fluid.dygraph.Layer): class InceptionB(nn.Layer):
def __init__(self, name=None): def __init__(self, name=None):
super(InceptionB, self).__init__() super(InceptionB, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="avg", pool_padding=1) self._pool = AvgPool2d(kernel_size=3, stride=1, padding=1)
self._conv1 = ConvBNLayer( self._conv1 = ConvBNLayer(
1024, 128, 1, act="relu", name="inception_b" + name + "_1x1") 1024, 128, 1, act="relu", name="inception_b" + name + "_1x1")
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
...@@ -254,14 +271,14 @@ class InceptionB(fluid.dygraph.Layer): ...@@ -254,14 +271,14 @@ class InceptionB(fluid.dygraph.Layer):
conv4 = self._conv4_4(conv4) conv4 = self._conv4_4(conv4)
conv4 = self._conv4_5(conv4) conv4 = self._conv4_5(conv4)
concat = fluid.layers.concat([conv1, conv2, conv3, conv4], axis=1) concat = paddle.concat([conv1, conv2, conv3, conv4], axis=1)
return concat return concat
class ReductionB(fluid.dygraph.Layer): class ReductionB(nn.Layer):
def __init__(self): def __init__(self):
super(ReductionB, self).__init__() super(ReductionB, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
self._conv2_1 = ConvBNLayer( self._conv2_1 = ConvBNLayer(
1024, 192, 1, act="relu", name="reduction_b_3x3_reduce") 1024, 192, 1, act="relu", name="reduction_b_3x3_reduce")
self._conv2_2 = ConvBNLayer( self._conv2_2 = ConvBNLayer(
...@@ -294,15 +311,15 @@ class ReductionB(fluid.dygraph.Layer): ...@@ -294,15 +311,15 @@ class ReductionB(fluid.dygraph.Layer):
conv3 = self._conv3_3(conv3) conv3 = self._conv3_3(conv3)
conv3 = self._conv3_4(conv3) conv3 = self._conv3_4(conv3)
concat = fluid.layers.concat([pool1, conv2, conv3], axis=1) concat = paddle.concat([pool1, conv2, conv3], axis=1)
return concat return concat
class InceptionC(fluid.dygraph.Layer): class InceptionC(nn.Layer):
def __init__(self, name=None): def __init__(self, name=None):
super(InceptionC, self).__init__() super(InceptionC, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="avg", pool_padding=1) self._pool = AvgPool2d(kernel_size=3, stride=1, padding=1)
self._conv1 = ConvBNLayer( self._conv1 = ConvBNLayer(
1536, 256, 1, act="relu", name="inception_c" + name + "_1x1") 1536, 256, 1, act="relu", name="inception_c" + name + "_1x1")
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
...@@ -364,13 +381,13 @@ class InceptionC(fluid.dygraph.Layer): ...@@ -364,13 +381,13 @@ class InceptionC(fluid.dygraph.Layer):
conv4_1 = self._conv4_1(conv4) conv4_1 = self._conv4_1(conv4)
conv4_2 = self._conv4_2(conv4) conv4_2 = self._conv4_2(conv4)
concat = fluid.layers.concat( concat = paddle.concat(
[conv1, conv2, conv3_1, conv3_2, conv4_1, conv4_2], axis=1) [conv1, conv2, conv3_1, conv3_2, conv4_1, conv4_2], axis=1)
return concat return concat
class InceptionV4DY(fluid.dygraph.Layer): class InceptionV4DY(nn.Layer):
def __init__(self, class_dim=1000): def __init__(self, class_dim=1000):
super(InceptionV4DY, self).__init__() super(InceptionV4DY, self).__init__()
self._inception_stem = InceptionStem() self._inception_stem = InceptionStem()
...@@ -394,15 +411,14 @@ class InceptionV4DY(fluid.dygraph.Layer): ...@@ -394,15 +411,14 @@ class InceptionV4DY(fluid.dygraph.Layer):
self._inceptionC_2 = InceptionC(name="2") self._inceptionC_2 = InceptionC(name="2")
self._inceptionC_3 = InceptionC(name="3") self._inceptionC_3 = InceptionC(name="3")
self.avg_pool = Pool2D(pool_type='avg', global_pooling=True) self.avg_pool = AdaptiveAvgPool2d(1)
self._drop = Dropout(p=0.2) self._drop = Dropout(p=0.2, mode="downscale_in_infer")
stdv = 1.0 / math.sqrt(1536 * 1.0) stdv = 1.0 / math.sqrt(1536 * 1.0)
self.out = Linear( self.out = Linear(
1536, 1536,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="final_fc_weights"),
name="final_fc_weights"),
bias_attr=ParamAttr(name="final_fc_offset")) bias_attr=ParamAttr(name="final_fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -428,7 +444,7 @@ class InceptionV4DY(fluid.dygraph.Layer): ...@@ -428,7 +444,7 @@ class InceptionV4DY(fluid.dygraph.Layer):
x = self._inceptionC_3(x) x = self._inceptionC_3(x)
x = self.avg_pool(x) x = self.avg_pool(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
x = self._drop(x) x = self._drop(x)
x = self.out(x) x = self.out(x)
return x return x
......
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import warnings
import paddle.fluid as fluid
def initial_type(name,
input,
op_type,
fan_out,
init="google",
use_bias=False,
filter_size=0,
stddev=0.02):
if init == "kaiming":
if op_type == 'conv':
fan_in = input.shape[1] * filter_size * filter_size
elif op_type == 'deconv':
fan_in = fan_out * filter_size * filter_size
else:
if len(input.shape) > 2:
fan_in = input.shape[1] * input.shape[2] * input.shape[3]
else:
fan_in = input.shape[1]
bound = 1 / math.sqrt(fan_in)
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.Uniform(
low=-bound, high=bound))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Uniform(
low=-bound, high=bound))
else:
bias_attr = False
elif init == 'google':
n = filter_size * filter_size * fan_out
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=math.sqrt(2.0 / n)))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset",
initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
else:
param_attr = fluid.ParamAttr(
name=name + "_weights",
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev))
if use_bias == True:
bias_attr = fluid.ParamAttr(
name=name + "_offset",
initializer=fluid.initializer.Constant(0.0))
else:
bias_attr = False
return param_attr, bias_attr
def cal_padding(img_size, stride, filter_size, dilation=1):
"""Calculate padding size."""
if img_size % stride == 0:
out_size = max(filter_size - stride, 0)
else:
out_size = max(filter_size - (img_size % stride), 0)
return out_size // 2, out_size - out_size // 2
def init_batch_norm_layer(name="batch_norm"):
param_attr = fluid.ParamAttr(
name=name + '_scale', initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def init_fc_layer(fout, name='fc'):
n = fout # fan-out
init_range = 1.0 / math.sqrt(n)
param_attr = fluid.ParamAttr(
name=name + '_weights',
initializer=fluid.initializer.UniformInitializer(
low=-init_range, high=init_range))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return param_attr, bias_attr
def norm_layer(input, norm_type='batch_norm', name=None):
if norm_type == 'batch_norm':
param_attr = fluid.ParamAttr(
name=name + '_weights',
initializer=fluid.initializer.Constant(1.0))
bias_attr = fluid.ParamAttr(
name=name + '_offset',
initializer=fluid.initializer.Constant(value=0.0))
return fluid.layers.batch_norm(
input,
param_attr=param_attr,
bias_attr=bias_attr,
moving_mean_name=name + '_mean',
moving_variance_name=name + '_variance')
elif norm_type == 'instance_norm':
helper = fluid.layer_helper.LayerHelper("instance_norm", **locals())
dtype = helper.input_dtype()
epsilon = 1e-5
mean = fluid.layers.reduce_mean(input, dim=[2, 3], keep_dim=True)
var = fluid.layers.reduce_mean(
fluid.layers.square(input - mean), dim=[2, 3], keep_dim=True)
if name is not None:
scale_name = name + "_scale"
offset_name = name + "_offset"
scale_param = fluid.ParamAttr(
name=scale_name,
initializer=fluid.initializer.Constant(1.0),
trainable=True)
offset_param = fluid.ParamAttr(
name=offset_name,
initializer=fluid.initializer.Constant(0.0),
trainable=True)
scale = helper.create_parameter(
attr=scale_param, shape=input.shape[1:2], dtype=dtype)
offset = helper.create_parameter(
attr=offset_param, shape=input.shape[1:2], dtype=dtype)
tmp = fluid.layers.elementwise_mul(x=(input - mean), y=scale, axis=1)
tmp = tmp / fluid.layers.sqrt(var + epsilon)
tmp = fluid.layers.elementwise_add(tmp, offset, axis=1)
return tmp
else:
raise NotImplementedError("norm tyoe: [%s] is not support" % norm_type)
def conv2d(input,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding=0,
groups=None,
name="conv2d",
norm=None,
act=None,
relufactor=0.0,
use_bias=False,
padding_type=None,
initial="normal",
use_cudnn=True):
if padding != 0 and padding_type != None:
warnings.warn(
'padding value and padding type are set in the same time, and the final padding width and padding height are computed by padding_type'
)
param_attr, bias_attr = initial_type(
name=name,
input=input,
op_type='conv',
fan_out=num_filters,
init=initial,
use_bias=use_bias,
filter_size=filter_size,
stddev=stddev)
def get_padding(filter_size, stride=1, dilation=1):
padding = ((stride - 1) + dilation * (filter_size - 1)) // 2
return padding
need_crop = False
if padding_type == "SAME":
top_padding, bottom_padding = cal_padding(input.shape[2], stride,
filter_size)
left_padding, right_padding = cal_padding(input.shape[2], stride,
filter_size)
height_padding = bottom_padding
width_padding = right_padding
if top_padding != bottom_padding or left_padding != right_padding:
height_padding = top_padding + stride
width_padding = left_padding + stride
need_crop = True
padding = [height_padding, width_padding]
elif padding_type == "VALID":
height_padding = 0
width_padding = 0
padding = [height_padding, width_padding]
elif padding_type == "DYNAMIC":
padding = get_padding(filter_size, stride)
else:
padding = padding
conv = fluid.layers.conv2d(
input,
num_filters,
filter_size,
groups=groups,
name=name,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
param_attr=param_attr,
bias_attr=bias_attr)
if need_crop:
conv = conv[:, :, 1:, 1:]
if norm is not None:
conv = norm_layer(input=conv, norm_type=norm, name=name + "_norm")
if act == 'relu':
conv = fluid.layers.relu(conv, name=name + '_relu')
elif act == 'leaky_relu':
conv = fluid.layers.leaky_relu(
conv, alpha=relufactor, name=name + '_leaky_relu')
elif act == 'tanh':
conv = fluid.layers.tanh(conv, name=name + '_tanh')
elif act == 'sigmoid':
conv = fluid.layers.sigmoid(conv, name=name + '_sigmoid')
elif act == 'swish':
conv = fluid.layers.swish(conv, name=name + '_swish')
elif act == None:
conv = conv
else:
raise NotImplementedError("activation: [%s] is not support" % act)
return conv
...@@ -18,10 +18,12 @@ from __future__ import print_function ...@@ -18,10 +18,12 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.fluid.initializer import MSRA from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import MSRA
import math import math
__all__ = [ __all__ = [
...@@ -29,7 +31,7 @@ __all__ = [ ...@@ -29,7 +31,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
filter_size, filter_size,
...@@ -39,20 +41,17 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -39,20 +41,17 @@ class ConvBNLayer(fluid.dygraph.Layer):
channels=None, channels=None,
num_groups=1, num_groups=1,
act='relu', act='relu',
use_cudnn=True,
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
groups=num_groups, groups=num_groups,
act=None, weight_attr=ParamAttr(
use_cudnn=use_cudnn,
param_attr=ParamAttr(
initializer=MSRA(), name=name + "_weights"), initializer=MSRA(), name=name + "_weights"),
bias_attr=False) bias_attr=False)
...@@ -70,7 +69,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -70,7 +69,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class DepthwiseSeparable(fluid.dygraph.Layer): class DepthwiseSeparable(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters1, num_filters1,
...@@ -88,7 +87,6 @@ class DepthwiseSeparable(fluid.dygraph.Layer): ...@@ -88,7 +87,6 @@ class DepthwiseSeparable(fluid.dygraph.Layer):
stride=stride, stride=stride,
padding=1, padding=1,
num_groups=int(num_groups * scale), num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw") name=name + "_dw")
self._pointwise_conv = ConvBNLayer( self._pointwise_conv = ConvBNLayer(
...@@ -105,7 +103,7 @@ class DepthwiseSeparable(fluid.dygraph.Layer): ...@@ -105,7 +103,7 @@ class DepthwiseSeparable(fluid.dygraph.Layer):
return y return y
class MobileNet(fluid.dygraph.Layer): class MobileNet(nn.Layer):
def __init__(self, scale=1.0, class_dim=1000): def __init__(self, scale=1.0, class_dim=1000):
super(MobileNet, self).__init__() super(MobileNet, self).__init__()
self.scale = scale self.scale = scale
...@@ -229,12 +227,12 @@ class MobileNet(fluid.dygraph.Layer): ...@@ -229,12 +227,12 @@ class MobileNet(fluid.dygraph.Layer):
name="conv6")) name="conv6"))
self.block_list.append(conv6) self.block_list.append(conv6)
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
self.out = Linear( self.out = Linear(
int(1024 * scale), int(1024 * scale),
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=MSRA(), name="fc7_weights"), initializer=MSRA(), name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset")) bias_attr=ParamAttr(name="fc7_offset"))
...@@ -243,7 +241,7 @@ class MobileNet(fluid.dygraph.Layer): ...@@ -243,7 +241,7 @@ class MobileNet(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, int(1024 * self.scale)]) y = paddle.reshape(y, shape=[-1, int(1024 * self.scale)])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -18,9 +18,11 @@ from __future__ import print_function ...@@ -18,9 +18,11 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
import math import math
...@@ -30,7 +32,7 @@ __all__ = [ ...@@ -30,7 +32,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
filter_size, filter_size,
...@@ -43,16 +45,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -43,16 +45,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
use_cudnn=True): use_cudnn=True):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
groups=num_groups, groups=num_groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
use_cudnn=use_cudnn,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
self._batch_norm = BatchNorm( self._batch_norm = BatchNorm(
...@@ -66,11 +66,11 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -66,11 +66,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
y = self._conv(inputs) y = self._conv(inputs)
y = self._batch_norm(y) y = self._batch_norm(y)
if if_act: if if_act:
y = fluid.layers.relu6(y) y = F.relu6(y)
return y return y
class InvertedResidualUnit(fluid.dygraph.Layer): class InvertedResidualUnit(nn.Layer):
def __init__(self, num_channels, num_in_filter, num_filters, stride, def __init__(self, num_channels, num_in_filter, num_filters, stride,
filter_size, padding, expansion_factor, name): filter_size, padding, expansion_factor, name):
super(InvertedResidualUnit, self).__init__() super(InvertedResidualUnit, self).__init__()
...@@ -108,11 +108,11 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ...@@ -108,11 +108,11 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
y = self._bottleneck_conv(y, if_act=True) y = self._bottleneck_conv(y, if_act=True)
y = self._linear_conv(y, if_act=False) y = self._linear_conv(y, if_act=False)
if ifshortcut: if ifshortcut:
y = fluid.layers.elementwise_add(inputs, y) y = paddle.elementwise_add(inputs, y)
return y return y
class InvresiBlocks(fluid.dygraph.Layer): class InvresiBlocks(nn.Layer):
def __init__(self, in_c, t, c, n, s, name): def __init__(self, in_c, t, c, n, s, name):
super(InvresiBlocks, self).__init__() super(InvresiBlocks, self).__init__()
...@@ -148,7 +148,7 @@ class InvresiBlocks(fluid.dygraph.Layer): ...@@ -148,7 +148,7 @@ class InvresiBlocks(fluid.dygraph.Layer):
return y return y
class MobileNet(fluid.dygraph.Layer): class MobileNet(nn.Layer):
def __init__(self, class_dim=1000, scale=1.0): def __init__(self, class_dim=1000, scale=1.0):
super(MobileNet, self).__init__() super(MobileNet, self).__init__()
self.scale = scale self.scale = scale
...@@ -199,12 +199,12 @@ class MobileNet(fluid.dygraph.Layer): ...@@ -199,12 +199,12 @@ class MobileNet(fluid.dygraph.Layer):
padding=0, padding=0,
name="conv9") name="conv9")
self.pool2d_avg = Pool2D(pool_type="avg", global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
self.out = Linear( self.out = Linear(
self.out_c, self.out_c,
class_dim, class_dim,
param_attr=ParamAttr(name="fc10_weights"), weight_attr=ParamAttr(name="fc10_weights"),
bias_attr=ParamAttr(name="fc10_offset")) bias_attr=ParamAttr(name="fc10_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -213,7 +213,7 @@ class MobileNet(fluid.dygraph.Layer): ...@@ -213,7 +213,7 @@ class MobileNet(fluid.dygraph.Layer):
y = block(y) y = block(y)
y = self.conv9(y, if_act=True) y = self.conv9(y, if_act=True)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.out_c]) y = paddle.reshape(y, shape=[-1, self.out_c])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -18,9 +18,12 @@ from __future__ import print_function ...@@ -18,9 +18,12 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.regularizer import L2Decay
import math import math
...@@ -42,8 +45,12 @@ def make_divisible(v, divisor=8, min_value=None): ...@@ -42,8 +45,12 @@ def make_divisible(v, divisor=8, min_value=None):
return new_v return new_v
class MobileNetV3(fluid.dygraph.Layer): class MobileNetV3(nn.Layer):
def __init__(self, scale=1.0, model_name="small", class_dim=1000): def __init__(self,
scale=1.0,
model_name="small",
dropout_prob=0.2,
class_dim=1000):
super(MobileNetV3, self).__init__() super(MobileNetV3, self).__init__()
inplanes = 16 inplanes = 16
...@@ -130,41 +137,42 @@ class MobileNetV3(fluid.dygraph.Layer): ...@@ -130,41 +137,42 @@ class MobileNetV3(fluid.dygraph.Layer):
act="hard_swish", act="hard_swish",
name="conv_last") name="conv_last")
self.pool = Pool2D( self.pool = AdaptiveAvgPool2d(1)
pool_type="avg", global_pooling=True, use_cudnn=False)
self.last_conv = Conv2D( self.last_conv = Conv2d(
num_channels=make_divisible(scale * self.cls_ch_squeeze), in_channels=make_divisible(scale * self.cls_ch_squeeze),
num_filters=self.cls_ch_expand, out_channels=self.cls_ch_expand,
filter_size=1, kernel_size=1,
stride=1, stride=1,
padding=0, padding=0,
act=None, weight_attr=ParamAttr(name="last_1x1_conv_weights"),
param_attr=ParamAttr(name="last_1x1_conv_weights"),
bias_attr=False) bias_attr=False)
self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
self.out = Linear( self.out = Linear(
input_dim=self.cls_ch_expand, self.cls_ch_expand,
output_dim=class_dim, class_dim,
param_attr=ParamAttr("fc_weights"), weight_attr=ParamAttr("fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs, label=None, dropout_prob=0.2): def forward(self, inputs, label=None):
x = self.conv1(inputs) x = self.conv1(inputs)
for block in self.block_list: for block in self.block_list:
x = block(x) x = block(x)
x = self.last_second_conv(x) x = self.last_second_conv(x)
x = self.pool(x) x = self.pool(x)
x = self.last_conv(x) x = self.last_conv(x)
x = fluid.layers.hard_swish(x) x = F.hard_swish(x)
x = fluid.layers.dropout(x=x, dropout_prob=dropout_prob) x = self.dropout(x)
x = fluid.layers.reshape(x, shape=[x.shape[0], x.shape[1]]) x = paddle.reshape(x, shape=[x.shape[0], x.shape[1]])
x = self.out(x) x = self.out(x)
return x return x
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
in_c, in_c,
out_c, out_c,
...@@ -179,28 +187,22 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -179,28 +187,22 @@ class ConvBNLayer(fluid.dygraph.Layer):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self.if_act = if_act self.if_act = if_act
self.act = act self.act = act
self.conv = fluid.dygraph.Conv2D( self.conv = Conv2d(
num_channels=in_c, in_channels=in_c,
num_filters=out_c, out_channels=out_c,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
groups=num_groups, groups=num_groups,
param_attr=ParamAttr(name=name + "_weights"), weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False, bias_attr=False)
use_cudnn=use_cudnn, self.bn = BatchNorm(
act=None)
self.bn = fluid.dygraph.BatchNorm(
num_channels=out_c, num_channels=out_c,
act=None, act=None,
param_attr=ParamAttr( param_attr=ParamAttr(
name=name + "_bn_scale", name=name + "_bn_scale", regularizer=L2Decay(0.0)),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=0.0)),
bias_attr=ParamAttr( bias_attr=ParamAttr(
name=name + "_bn_offset", name=name + "_bn_offset", regularizer=L2Decay(0.0)),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=0.0)),
moving_mean_name=name + "_bn_mean", moving_mean_name=name + "_bn_mean",
moving_variance_name=name + "_bn_variance") moving_variance_name=name + "_bn_variance")
...@@ -209,16 +211,16 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -209,16 +211,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
x = self.bn(x) x = self.bn(x)
if self.if_act: if self.if_act:
if self.act == "relu": if self.act == "relu":
x = fluid.layers.relu(x) x = F.relu(x)
elif self.act == "hard_swish": elif self.act == "hard_swish":
x = fluid.layers.hard_swish(x) x = F.hard_swish(x)
else: else:
print("The activation function is selected incorrectly.") print("The activation function is selected incorrectly.")
exit() exit()
return x return x
class ResidualUnit(fluid.dygraph.Layer): class ResidualUnit(nn.Layer):
def __init__(self, def __init__(self,
in_c, in_c,
mid_c, mid_c,
...@@ -270,40 +272,38 @@ class ResidualUnit(fluid.dygraph.Layer): ...@@ -270,40 +272,38 @@ class ResidualUnit(fluid.dygraph.Layer):
x = self.mid_se(x) x = self.mid_se(x)
x = self.linear_conv(x) x = self.linear_conv(x)
if self.if_shortcut: if self.if_shortcut:
x = fluid.layers.elementwise_add(inputs, x) x = paddle.elementwise_add(inputs, x)
return x return x
class SEModule(fluid.dygraph.Layer): class SEModule(nn.Layer):
def __init__(self, channel, reduction=4, name=""): def __init__(self, channel, reduction=4, name=""):
super(SEModule, self).__init__() super(SEModule, self).__init__()
self.avg_pool = fluid.dygraph.Pool2D( self.avg_pool = AdaptiveAvgPool2d(1)
pool_type="avg", global_pooling=True, use_cudnn=False) self.conv1 = Conv2d(
self.conv1 = fluid.dygraph.Conv2D( in_channels=channel,
num_channels=channel, out_channels=channel // reduction,
num_filters=channel // reduction, kernel_size=1,
filter_size=1,
stride=1, stride=1,
padding=0, padding=0,
act="relu", weight_attr=ParamAttr(name=name + "_1_weights"),
param_attr=ParamAttr(name=name + "_1_weights"),
bias_attr=ParamAttr(name=name + "_1_offset")) bias_attr=ParamAttr(name=name + "_1_offset"))
self.conv2 = fluid.dygraph.Conv2D( self.conv2 = Conv2d(
num_channels=channel // reduction, in_channels=channel // reduction,
num_filters=channel, out_channels=channel,
filter_size=1, kernel_size=1,
stride=1, stride=1,
padding=0, padding=0,
act=None, weight_attr=ParamAttr(name + "_2_weights"),
param_attr=ParamAttr(name + "_2_weights"),
bias_attr=ParamAttr(name=name + "_2_offset")) bias_attr=ParamAttr(name=name + "_2_offset"))
def forward(self, inputs): def forward(self, inputs):
outputs = self.avg_pool(inputs) outputs = self.avg_pool(inputs)
outputs = self.conv1(outputs) outputs = self.conv1(outputs)
outputs = F.relu(outputs)
outputs = self.conv2(outputs) outputs = self.conv2(outputs)
outputs = fluid.layers.hard_sigmoid(outputs) outputs = F.hard_sigmoid(outputs)
return fluid.layers.elementwise_mul(x=inputs, y=outputs, axis=0) return paddle.multiply(x=inputs, y=outputs, axis=0)
def MobileNetV3_small_x0_35(**args): def MobileNetV3_small_x0_35(**args):
......
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import contextlib
bn_regularizer = fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.0)
name_scope = ""
@contextlib.contextmanager
def scope(name):
global name_scope
bk = name_scope
name_scope = name_scope + name + '/'
yield
name_scope = bk
def max_pool(input, kernel, stride, padding):
data = fluid.layers.pool2d(
input,
pool_size=kernel,
pool_type='max',
pool_stride=stride,
pool_padding=padding)
return data
def group_norm(input, G, eps=1e-5, param_attr=None, bias_attr=None):
N, C, H, W = input.shape
if C % G != 0:
# print "group can not divide channle:", C, G
for d in range(10):
for t in [d, -d]:
if G + t <= 0: continue
if C % (G + t) == 0:
G = G + t
break
if C % G == 0:
# print "use group size:", G
break
assert C % G == 0
x = fluid.layers.group_norm(
input,
groups=G,
param_attr=param_attr,
bias_attr=bias_attr,
name=name_scope + 'group_norm')
return x
def bn(*args, **kargs):
with scope('BatchNorm'):
return fluid.layers.batch_norm(
*args,
epsilon=1e-3,
momentum=0.99,
param_attr=fluid.ParamAttr(
name=name_scope + 'gamma', regularizer=bn_regularizer),
bias_attr=fluid.ParamAttr(
name=name_scope + 'beta', regularizer=bn_regularizer),
moving_mean_name=name_scope + 'moving_mean',
moving_variance_name=name_scope + 'moving_variance',
**kargs)
def bn_relu(data):
return fluid.layers.relu(bn(data))
def relu(data):
return fluid.layers.relu(data)
def conv(*args, **kargs):
kargs['param_attr'] = name_scope + 'weights'
if 'bias_attr' in kargs and kargs['bias_attr']:
kargs['bias_attr'] = fluid.ParamAttr(
name=name_scope + 'biases',
regularizer=None,
initializer=fluid.initializer.ConstantInitializer(value=0.0))
else:
kargs['bias_attr'] = False
return fluid.layers.conv2d(*args, **kargs)
def deconv(*args, **kargs):
kargs['param_attr'] = name_scope + 'weights'
if 'bias_attr' in kargs and kargs['bias_attr']:
kargs['bias_attr'] = name_scope + 'biases'
else:
kargs['bias_attr'] = False
return fluid.layers.conv2d_transpose(*args, **kargs)
def seperate_conv(input, channel, stride, filter, dilation=1, act=None):
param_attr = fluid.ParamAttr(
name=name_scope + 'weights',
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=0.0),
initializer=fluid.initializer.TruncatedNormal(
loc=0.0, scale=0.33))
with scope('depthwise'):
input = conv(
input,
input.shape[1],
filter,
stride,
groups=input.shape[1],
padding=(filter // 2) * dilation,
dilation=dilation,
use_cudnn=False,
param_attr=param_attr)
input = bn(input)
if act: input = act(input)
param_attr = fluid.ParamAttr(
name=name_scope + 'weights',
regularizer=None,
initializer=fluid.initializer.TruncatedNormal(
loc=0.0, scale=0.06))
with scope('pointwise'):
input = conv(
input, channel, 1, 1, groups=1, padding=0, param_attr=param_attr)
input = bn(input)
if act: input = act(input)
return input
...@@ -18,9 +18,12 @@ from __future__ import print_function ...@@ -18,9 +18,12 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -31,7 +34,7 @@ __all__ = [ ...@@ -31,7 +34,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__( def __init__(
self, self,
num_channels, num_channels,
...@@ -43,15 +46,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -43,15 +46,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None, ): name=None, ):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -71,7 +73,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -71,7 +73,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels1, num_channels1,
num_channels2, num_channels2,
...@@ -102,8 +104,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -102,8 +104,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
act='relu', act='relu',
name=name + '_branch2b_' + str(s + 1))) name=name + '_branch2b_' + str(s + 1)))
self.conv1_list.append(conv1) self.conv1_list.append(conv1)
self.pool2d_avg = Pool2D( self.pool2d_avg = AvgPool2d(kernel_size=3, stride=stride, padding=1)
pool_size=3, pool_stride=stride, pool_padding=1, pool_type='avg')
self.conv2 = ConvBNLayer( self.conv2 = ConvBNLayer(
num_channels=num_filters, num_channels=num_filters,
...@@ -124,7 +125,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -124,7 +125,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
y = self.conv0(inputs) y = self.conv0(inputs)
xs = fluid.layers.split(y, self.scales, 1) xs = paddle.split(y, self.scales, 1)
ys = [] ys = []
for s, conv1 in enumerate(self.conv1_list): for s, conv1 in enumerate(self.conv1_list):
if s == 0 or self.stride == 2: if s == 0 or self.stride == 2:
...@@ -135,18 +136,18 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -135,18 +136,18 @@ class BottleneckBlock(fluid.dygraph.Layer):
ys.append(xs[-1]) ys.append(xs[-1])
else: else:
ys.append(self.pool2d_avg(xs[-1])) ys.append(self.pool2d_avg(xs[-1]))
conv1 = fluid.layers.concat(ys, axis=1) conv1 = paddle.concat(ys, axis=1)
conv2 = self.conv2(conv1) conv2 = self.conv2(conv1)
if self.shortcut: if self.shortcut:
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu') y = paddle.elementwise_add(x=short, y=conv2, act='relu')
return y return y
class Res2Net(fluid.dygraph.Layer): class Res2Net(nn.Layer):
def __init__(self, layers=50, scales=4, width=26, class_dim=1000): def __init__(self, layers=50, scales=4, width=26, class_dim=1000):
super(Res2Net, self).__init__() super(Res2Net, self).__init__()
...@@ -178,8 +179,7 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -178,8 +179,7 @@ class Res2Net(fluid.dygraph.Layer):
stride=2, stride=2,
act='relu', act='relu',
name="conv1") name="conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
for block in range(len(depth)): for block in range(len(depth)):
...@@ -207,8 +207,7 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -207,8 +207,7 @@ class Res2Net(fluid.dygraph.Layer):
self.block_list.append(bottleneck_block) self.block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -217,9 +216,8 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -217,9 +216,8 @@ class Res2Net(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -228,7 +226,7 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -228,7 +226,7 @@ class Res2Net(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -18,9 +18,12 @@ from __future__ import print_function ...@@ -18,9 +18,12 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -31,7 +34,7 @@ __all__ = [ ...@@ -31,7 +34,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__( def __init__(
self, self,
num_channels, num_channels,
...@@ -45,21 +48,17 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -45,21 +48,17 @@ class ConvBNLayer(fluid.dygraph.Layer):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self.is_vd_mode = is_vd_mode self.is_vd_mode = is_vd_mode
self._pool2d_avg = Pool2D( self._pool2d_avg = AvgPool2d(
pool_size=2, kernel_size=2, stride=2, padding=0, ceil_mode=True)
pool_stride=2, self._conv = Conv2d(
pool_padding=0, in_channels=num_channels,
pool_type='avg', out_channels=num_filters,
ceil_mode=True) kernel_size=filter_size,
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, act=None,
param_attr=ParamAttr(name=name + "_weights"), weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -81,7 +80,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -81,7 +80,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels1, num_channels1,
num_channels2, num_channels2,
...@@ -112,8 +111,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -112,8 +111,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
act='relu', act='relu',
name=name + '_branch2b_' + str(s + 1))) name=name + '_branch2b_' + str(s + 1)))
self.conv1_list.append(conv1) self.conv1_list.append(conv1)
self.pool2d_avg = Pool2D( self.pool2d_avg = AvgPool2d(
pool_size=3, pool_stride=stride, pool_padding=1, pool_type='avg') kernel_size=3, stride=stride, padding=1, ceil_mode=True)
self.conv2 = ConvBNLayer( self.conv2 = ConvBNLayer(
num_channels=num_filters, num_channels=num_filters,
...@@ -135,7 +134,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -135,7 +134,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
y = self.conv0(inputs) y = self.conv0(inputs)
xs = fluid.layers.split(y, self.scales, 1) xs = paddle.split(y, self.scales, 1)
ys = [] ys = []
for s, conv1 in enumerate(self.conv1_list): for s, conv1 in enumerate(self.conv1_list):
if s == 0 or self.stride == 2: if s == 0 or self.stride == 2:
...@@ -146,18 +145,18 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -146,18 +145,18 @@ class BottleneckBlock(fluid.dygraph.Layer):
ys.append(xs[-1]) ys.append(xs[-1])
else: else:
ys.append(self.pool2d_avg(xs[-1])) ys.append(self.pool2d_avg(xs[-1]))
conv1 = fluid.layers.concat(ys, axis=1) conv1 = paddle.concat(ys, axis=1)
conv2 = self.conv2(conv1) conv2 = self.conv2(conv1)
if self.shortcut: if self.shortcut:
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu') y = paddle.elementwise_add(x=short, y=conv2, act='relu')
return y return y
class Res2Net_vd(fluid.dygraph.Layer): class Res2Net_vd(nn.Layer):
def __init__(self, layers=50, scales=4, width=26, class_dim=1000): def __init__(self, layers=50, scales=4, width=26, class_dim=1000):
super(Res2Net_vd, self).__init__() super(Res2Net_vd, self).__init__()
...@@ -203,8 +202,7 @@ class Res2Net_vd(fluid.dygraph.Layer): ...@@ -203,8 +202,7 @@ class Res2Net_vd(fluid.dygraph.Layer):
stride=1, stride=1,
act='relu', act='relu',
name="conv1_3") name="conv1_3")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
for block in range(len(depth)): for block in range(len(depth)):
...@@ -232,8 +230,7 @@ class Res2Net_vd(fluid.dygraph.Layer): ...@@ -232,8 +230,7 @@ class Res2Net_vd(fluid.dygraph.Layer):
self.block_list.append(bottleneck_block) self.block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -242,9 +239,8 @@ class Res2Net_vd(fluid.dygraph.Layer): ...@@ -242,9 +239,8 @@ class Res2Net_vd(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -255,7 +251,7 @@ class Res2Net_vd(fluid.dygraph.Layer): ...@@ -255,7 +251,7 @@ class Res2Net_vd(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -20,11 +20,11 @@ import numpy as np ...@@ -20,11 +20,11 @@ import numpy as np
import paddle import paddle
import math import math
import paddle.nn as nn import paddle.nn as nn
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr from paddle.nn.initializer import MSRA
from paddle.fluid.regularizer import L2DecayRegularizer from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.fluid.initializer import MSRA, ConstantInitializer from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.regularizer import L2Decay
__all__ = ["ResNeSt50_fast_1s1x64d", "ResNeSt50"] __all__ = ["ResNeSt50_fast_1s1x64d", "ResNeSt50"]
...@@ -43,26 +43,23 @@ class ConvBNLayer(nn.Layer): ...@@ -43,26 +43,23 @@ class ConvBNLayer(nn.Layer):
bn_decay = 0.0 bn_decay = 0.0
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
dilation=dilation, dilation=dilation,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weight"),
param_attr=ParamAttr(name=name + "_weight"),
bias_attr=False) bias_attr=False)
self._batch_norm = BatchNorm( self._batch_norm = BatchNorm(
num_filters, num_filters,
act=act, act=act,
param_attr=ParamAttr( param_attr=ParamAttr(
name=name + "_scale", name=name + "_scale", regularizer=L2Decay(bn_decay)),
regularizer=L2DecayRegularizer(regularization_coeff=bn_decay)),
bias_attr=ParamAttr( bias_attr=ParamAttr(
name + "_offset", name + "_offset", regularizer=L2Decay(bn_decay)),
regularizer=L2DecayRegularizer(regularization_coeff=bn_decay)),
moving_mean_name=name + "_mean", moving_mean_name=name + "_mean",
moving_variance_name=name + "_variance") moving_variance_name=name + "_variance")
...@@ -124,7 +121,7 @@ class SplatConv(nn.Layer): ...@@ -124,7 +121,7 @@ class SplatConv(nn.Layer):
act="relu", act="relu",
name=name + "_splat1") name=name + "_splat1")
self.avg_pool2d = Pool2D(pool_type='avg', global_pooling=True) self.avg_pool2d = AdaptiveAvgPool2d(1)
inter_channels = int(max(in_channels * radix // reduction_factor, 32)) inter_channels = int(max(in_channels * radix // reduction_factor, 32))
...@@ -139,15 +136,14 @@ class SplatConv(nn.Layer): ...@@ -139,15 +136,14 @@ class SplatConv(nn.Layer):
name=name + "_splat2") name=name + "_splat2")
# to calc atten # to calc atten
self.conv3 = Conv2D( self.conv3 = Conv2d(
num_channels=inter_channels, in_channels=inter_channels,
num_filters=channels * radix, out_channels=channels * radix,
filter_size=1, kernel_size=1,
stride=1, stride=1,
padding=0, padding=0,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(
param_attr=ParamAttr(
name=name + "_splat_weights", initializer=MSRA()), name=name + "_splat_weights", initializer=MSRA()),
bias_attr=False) bias_attr=False)
...@@ -221,11 +217,8 @@ class BottleneckBlock(nn.Layer): ...@@ -221,11 +217,8 @@ class BottleneckBlock(nn.Layer):
name=name + "_conv1") name=name + "_conv1")
if avd and avd_first and (stride > 1 or is_first): if avd and avd_first and (stride > 1 or is_first):
self.avg_pool2d_1 = Pool2D( self.avg_pool2d_1 = AvgPool2d(
pool_size=3, kernel_size=3, stride=stride, padding=1)
pool_stride=stride,
pool_padding=1,
pool_type="avg")
if radix >= 1: if radix >= 1:
self.conv2 = SplatConv( self.conv2 = SplatConv(
...@@ -252,11 +245,8 @@ class BottleneckBlock(nn.Layer): ...@@ -252,11 +245,8 @@ class BottleneckBlock(nn.Layer):
name=name + "_conv2") name=name + "_conv2")
if avd and avd_first == False and (stride > 1 or is_first): if avd and avd_first == False and (stride > 1 or is_first):
self.avg_pool2d_2 = Pool2D( self.avg_pool2d_2 = AvgPool2d(
pool_size=3, kernel_size=3, stride=stride, padding=1)
pool_stride=stride,
pool_padding=1,
pool_type="avg")
self.conv3 = ConvBNLayer( self.conv3 = ConvBNLayer(
num_channels=group_width, num_channels=group_width,
...@@ -270,39 +260,31 @@ class BottleneckBlock(nn.Layer): ...@@ -270,39 +260,31 @@ class BottleneckBlock(nn.Layer):
if stride != 1 or self.inplanes != self.planes * 4: if stride != 1 or self.inplanes != self.planes * 4:
if avg_down: if avg_down:
if dilation == 1: if dilation == 1:
self.avg_pool2d_3 = Pool2D( self.avg_pool2d_3 = AvgPool2d(
pool_size=stride, kernel_size=stride, stride=stride, padding=0)
pool_stride=stride,
pool_type="avg",
ceil_mode=True)
else: else:
self.avg_pool2d_3 = Pool2D( self.avg_pool2d_3 = AvgPool2d(
pool_size=1, kernel_size=1, stride=1, padding=0, ceil_mode=True)
pool_stride=1,
pool_type="avg",
ceil_mode=True)
self.conv4 = Conv2D( self.conv4 = Conv2d(
num_channels=self.inplanes, in_channels=self.inplanes,
num_filters=planes * 4, out_channels=planes * 4,
filter_size=1, kernel_size=1,
stride=1, stride=1,
padding=0, padding=0,
groups=1, groups=1,
act=None, weight_attr=ParamAttr(
param_attr=ParamAttr(
name=name + "_weights", initializer=MSRA()), name=name + "_weights", initializer=MSRA()),
bias_attr=False) bias_attr=False)
else: else:
self.conv4 = Conv2D( self.conv4 = Conv2d(
num_channels=self.inplanes, in_channels=self.inplanes,
num_filters=planes * 4, out_channels=planes * 4,
filter_size=1, kernel_size=1,
stride=stride, stride=stride,
padding=0, padding=0,
groups=1, groups=1,
act=None, weight_attr=ParamAttr(
param_attr=ParamAttr(
name=name + "_shortcut_weights", initializer=MSRA()), name=name + "_shortcut_weights", initializer=MSRA()),
bias_attr=False) bias_attr=False)
...@@ -312,12 +294,10 @@ class BottleneckBlock(nn.Layer): ...@@ -312,12 +294,10 @@ class BottleneckBlock(nn.Layer):
act=None, act=None,
param_attr=ParamAttr( param_attr=ParamAttr(
name=name + "_shortcut_scale", name=name + "_shortcut_scale",
regularizer=L2DecayRegularizer( regularizer=L2Decay(regularization_coeff=bn_decay)),
regularization_coeff=bn_decay)),
bias_attr=ParamAttr( bias_attr=ParamAttr(
name + "_shortcut_offset", name + "_shortcut_offset",
regularizer=L2DecayRegularizer( regularizer=L2Decay(regularization_coeff=bn_decay)),
regularization_coeff=bn_decay)),
moving_mean_name=name + "_shortcut_mean", moving_mean_name=name + "_shortcut_mean",
moving_variance_name=name + "_shortcut_variance") moving_variance_name=name + "_shortcut_variance")
...@@ -515,8 +495,7 @@ class ResNeSt(nn.Layer): ...@@ -515,8 +495,7 @@ class ResNeSt(nn.Layer):
act="relu", act="relu",
name="conv1") name="conv1")
self.max_pool2d = Pool2D( self.max_pool2d = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type="max")
self.layer1 = ResNeStLayer( self.layer1 = ResNeStLayer(
inplanes=self.stem_width * 2 inplanes=self.stem_width * 2
...@@ -645,7 +624,7 @@ class ResNeSt(nn.Layer): ...@@ -645,7 +624,7 @@ class ResNeSt(nn.Layer):
stride=2, stride=2,
name="layer4") name="layer4")
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
self.out_channels = 2048 self.out_channels = 2048
...@@ -654,7 +633,7 @@ class ResNeSt(nn.Layer): ...@@ -654,7 +633,7 @@ class ResNeSt(nn.Layer):
self.out = Linear( self.out = Linear(
self.out_channels, self.out_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=nn.initializer.Uniform(-stdv, stdv), initializer=nn.initializer.Uniform(-stdv, stdv),
name="fc_weights"), name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
......
...@@ -18,16 +18,18 @@ from __future__ import print_function ...@@ -18,16 +18,18 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
__all__ = ["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"] __all__ = ["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -38,15 +40,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -38,15 +40,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -66,7 +67,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -66,7 +67,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -117,11 +118,11 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -117,11 +118,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act="relu") y = paddle.elementwise_add(x=short, y=conv2, act="relu")
return y return y
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -162,11 +163,11 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -162,11 +163,11 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv1, act="relu") y = paddle.elementwise_add(x=short, y=conv1, act="relu")
return y return y
class ResNet(fluid.dygraph.Layer): class ResNet(nn.Layer):
def __init__(self, layers=50, class_dim=1000): def __init__(self, layers=50, class_dim=1000):
super(ResNet, self).__init__() super(ResNet, self).__init__()
...@@ -195,8 +196,7 @@ class ResNet(fluid.dygraph.Layer): ...@@ -195,8 +196,7 @@ class ResNet(fluid.dygraph.Layer):
stride=2, stride=2,
act="relu", act="relu",
name="conv1") name="conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type="max")
self.block_list = [] self.block_list = []
if layers >= 50: if layers >= 50:
...@@ -238,8 +238,7 @@ class ResNet(fluid.dygraph.Layer): ...@@ -238,8 +238,7 @@ class ResNet(fluid.dygraph.Layer):
self.block_list.append(basic_block) self.block_list.append(basic_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -248,9 +247,8 @@ class ResNet(fluid.dygraph.Layer): ...@@ -248,9 +247,8 @@ class ResNet(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_0.w_0"),
name="fc_0.w_0"),
bias_attr=ParamAttr(name="fc_0.b_0")) bias_attr=ParamAttr(name="fc_0.b_0"))
def forward(self, inputs): def forward(self, inputs):
...@@ -259,7 +257,7 @@ class ResNet(fluid.dygraph.Layer): ...@@ -259,7 +257,7 @@ class ResNet(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -18,9 +18,11 @@ from __future__ import print_function ...@@ -18,9 +18,11 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -29,7 +31,7 @@ __all__ = [ ...@@ -29,7 +31,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -40,15 +42,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -40,15 +42,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -68,7 +69,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -68,7 +69,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -119,11 +120,11 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -119,11 +120,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu') y = paddle.elementwise_add(x=short, y=conv2, act='relu')
return y return y
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -164,11 +165,11 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -164,11 +165,11 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv1, act='relu') y = paddle.elementwise_add(x=short, y=conv1, act='relu')
return y return y
class ResNet_vc(fluid.dygraph.Layer): class ResNet_vc(nn.Layer):
def __init__(self, layers=50, class_dim=1000): def __init__(self, layers=50, class_dim=1000):
super(ResNet_vc, self).__init__() super(ResNet_vc, self).__init__()
...@@ -212,8 +213,7 @@ class ResNet_vc(fluid.dygraph.Layer): ...@@ -212,8 +213,7 @@ class ResNet_vc(fluid.dygraph.Layer):
act='relu', act='relu',
name="conv1_3") name="conv1_3")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
if layers >= 50: if layers >= 50:
...@@ -255,8 +255,7 @@ class ResNet_vc(fluid.dygraph.Layer): ...@@ -255,8 +255,7 @@ class ResNet_vc(fluid.dygraph.Layer):
self.block_list.append(basic_block) self.block_list.append(basic_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -265,9 +264,8 @@ class ResNet_vc(fluid.dygraph.Layer): ...@@ -265,9 +264,8 @@ class ResNet_vc(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_0.w_0"),
name="fc_0.w_0"),
bias_attr=ParamAttr(name="fc_0.b_0")) bias_attr=ParamAttr(name="fc_0.b_0"))
def forward(self, inputs): def forward(self, inputs):
...@@ -278,7 +276,7 @@ class ResNet_vc(fluid.dygraph.Layer): ...@@ -278,7 +276,7 @@ class ResNet_vc(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -18,9 +18,11 @@ from __future__ import print_function ...@@ -18,9 +18,11 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -29,7 +31,7 @@ __all__ = [ ...@@ -29,7 +31,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__( def __init__(
self, self,
num_channels, num_channels,
...@@ -43,21 +45,16 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -43,21 +45,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self.is_vd_mode = is_vd_mode self.is_vd_mode = is_vd_mode
self._pool2d_avg = Pool2D( self._pool2d_avg = AvgPool2d(
pool_size=2, kernel_size=2, stride=2, padding=0, ceil_mode=True)
pool_stride=2, self._conv = Conv2d(
pool_padding=0, in_channels=num_channels,
pool_type='avg', out_channels=num_filters,
ceil_mode=True) kernel_size=filter_size,
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -79,7 +76,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -79,7 +76,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -129,11 +126,11 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -129,11 +126,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu') y = paddle.elementwise_add(x=short, y=conv2, act='relu')
return y return y
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -176,11 +173,11 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -176,11 +173,11 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv1, act='relu') y = paddle.elementwise_add(x=short, y=conv1, act='relu')
return y return y
class ResNet_vd(fluid.dygraph.Layer): class ResNet_vd(nn.Layer):
def __init__(self, layers=50, class_dim=1000): def __init__(self, layers=50, class_dim=1000):
super(ResNet_vd, self).__init__() super(ResNet_vd, self).__init__()
...@@ -225,8 +222,7 @@ class ResNet_vd(fluid.dygraph.Layer): ...@@ -225,8 +222,7 @@ class ResNet_vd(fluid.dygraph.Layer):
stride=1, stride=1,
act='relu', act='relu',
name="conv1_3") name="conv1_3")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
if layers >= 50: if layers >= 50:
...@@ -270,8 +266,7 @@ class ResNet_vd(fluid.dygraph.Layer): ...@@ -270,8 +266,7 @@ class ResNet_vd(fluid.dygraph.Layer):
self.block_list.append(basic_block) self.block_list.append(basic_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -280,9 +275,8 @@ class ResNet_vd(fluid.dygraph.Layer): ...@@ -280,9 +275,8 @@ class ResNet_vd(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_0.w_0"),
name="fc_0.w_0"),
bias_attr=ParamAttr(name="fc_0.b_0")) bias_attr=ParamAttr(name="fc_0.b_0"))
def forward(self, inputs): def forward(self, inputs):
...@@ -293,7 +287,7 @@ class ResNet_vd(fluid.dygraph.Layer): ...@@ -293,7 +287,7 @@ class ResNet_vd(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -18,9 +18,11 @@ from __future__ import print_function ...@@ -18,9 +18,11 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -30,7 +32,7 @@ __all__ = [ ...@@ -30,7 +32,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -41,15 +43,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -41,15 +43,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -69,7 +70,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -69,7 +70,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -121,11 +122,11 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -121,11 +122,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu') y = paddle.elementwise_add(x=short, y=conv2, act='relu')
return y return y
class ResNeXt(fluid.dygraph.Layer): class ResNeXt(nn.Layer):
def __init__(self, layers=50, class_dim=1000, cardinality=32): def __init__(self, layers=50, class_dim=1000, cardinality=32):
super(ResNeXt, self).__init__() super(ResNeXt, self).__init__()
...@@ -156,8 +157,7 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -156,8 +157,7 @@ class ResNeXt(fluid.dygraph.Layer):
stride=2, stride=2,
act='relu', act='relu',
name="res_conv1") name="res_conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
for block in range(len(depth)): for block in range(len(depth)):
...@@ -183,8 +183,7 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -183,8 +183,7 @@ class ResNeXt(fluid.dygraph.Layer):
self.block_list.append(bottleneck_block) self.block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -193,9 +192,8 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -193,9 +192,8 @@ class ResNeXt(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -204,7 +202,7 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -204,7 +202,7 @@ class ResNeXt(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
__all__ = ["ResNeXt101_32x8d_wsl", __all__ = [
"ResNeXt101_wsl_32x16d_wsl", "ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl", "ResNeXt101_32x32d_wsl",
"ResNeXt101_wsl_32x32d_wsl", "ResNeXt101_32x48d_wsl"
"ResNeXt101_wsl_32x48d_wsl"] ]
class ConvBNLayer(fluid.dygraph.Layer):
class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -22,14 +26,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -22,14 +26,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
conv_name = name + ".0" conv_name = name + ".0"
else: else:
conv_name = name conv_name = name
self._conv = Conv2D(num_channels=input_channels, self._conv = Conv2d(
num_filters=output_channels, in_channels=input_channels,
filter_size=filter_size, out_channels=output_channels,
kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size-1)//2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=conv_name + ".weight"),
param_attr=ParamAttr(name=conv_name + ".weight"),
bias_attr=False) bias_attr=False)
if "downsample" in name: if "downsample" in name:
bn_name = name[:9] + "downsample.1" bn_name = name[:9] + "downsample.1"
...@@ -37,8 +41,10 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -37,8 +41,10 @@ class ConvBNLayer(fluid.dygraph.Layer):
if "conv1" == name: if "conv1" == name:
bn_name = "bn" + name[-1] bn_name = "bn" + name[-1]
else: else:
bn_name = (name[:10] if name[7:9].isdigit() else name[:9]) + "bn" + name[-1] bn_name = (name[:10] if name[7:9].isdigit() else name[:9]
self._bn = BatchNorm(num_channels=output_channels, ) + "bn" + name[-1]
self._bn = BatchNorm(
num_channels=output_channels,
act=act, act=act,
param_attr=ParamAttr(name=bn_name + ".weight"), param_attr=ParamAttr(name=bn_name + ".weight"),
bias_attr=ParamAttr(name=bn_name + ".bias"), bias_attr=ParamAttr(name=bn_name + ".bias"),
...@@ -50,43 +56,68 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -50,43 +56,68 @@ class ConvBNLayer(fluid.dygraph.Layer):
x = self._bn(x) x = self._bn(x)
return x return x
class ShortCut(fluid.dygraph.Layer):
class ShortCut(nn.Layer):
def __init__(self, input_channels, output_channels, stride, name=None): def __init__(self, input_channels, output_channels, stride, name=None):
super(ShortCut, self).__init__() super(ShortCut, self).__init__()
self.input_channels = input_channels self.input_channels = input_channels
self.output_channels = output_channels self.output_channels = output_channels
self.stride = stride self.stride = stride
if input_channels!=output_channels or stride!=1: if input_channels != output_channels or stride != 1:
self._conv = ConvBNLayer( self._conv = ConvBNLayer(
input_channels, output_channels, filter_size=1, stride=stride, name=name) input_channels,
output_channels,
filter_size=1,
stride=stride,
name=name)
def forward(self, inputs): def forward(self, inputs):
if self.input_channels!= self.output_channels or self.stride!=1: if self.input_channels != self.output_channels or self.stride != 1:
return self._conv(inputs) return self._conv(inputs)
return inputs return inputs
class BottleneckBlock(fluid.dygraph.Layer):
def __init__(self, input_channels, output_channels, stride, cardinality, width, name): class BottleneckBlock(nn.Layer):
def __init__(self, input_channels, output_channels, stride, cardinality,
width, name):
super(BottleneckBlock, self).__init__() super(BottleneckBlock, self).__init__()
self._conv0 = ConvBNLayer( self._conv0 = ConvBNLayer(
input_channels, output_channels, filter_size=1, act="relu", name=name + ".conv1") input_channels,
output_channels,
filter_size=1,
act="relu",
name=name + ".conv1")
self._conv1 = ConvBNLayer( self._conv1 = ConvBNLayer(
output_channels, output_channels, filter_size=3, act="relu", stride=stride, groups=cardinality, name=name + ".conv2") output_channels,
output_channels,
filter_size=3,
act="relu",
stride=stride,
groups=cardinality,
name=name + ".conv2")
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
output_channels, output_channels//(width//8), filter_size=1, act=None, name=name + ".conv3") output_channels,
output_channels // (width // 8),
filter_size=1,
act=None,
name=name + ".conv3")
self._short = ShortCut( self._short = ShortCut(
input_channels, output_channels//(width//8), stride=stride, name=name + ".downsample") input_channels,
output_channels // (width // 8),
stride=stride,
name=name + ".downsample")
def forward(self, inputs): def forward(self, inputs):
x = self._conv0(inputs) x = self._conv0(inputs)
x = self._conv1(x) x = self._conv1(x)
x = self._conv2(x) x = self._conv2(x)
y = self._short(inputs) y = self._short(inputs)
return fluid.layers.elementwise_add(x, y, act="relu") return paddle.elementwise_add(x, y, act="relu")
class ResNeXt101WSL(fluid.dygraph.Layer): class ResNeXt101WSL(nn.Layer):
def __init__(self, layers=101, cardinality=32, width=48, class_dim=1000): def __init__(self, layers=101, cardinality=32, width=48, class_dim=1000):
super(ResNeXt101WSL, self).__init__() super(ResNeXt101WSL, self).__init__()
...@@ -95,92 +126,256 @@ class ResNeXt101WSL(fluid.dygraph.Layer): ...@@ -95,92 +126,256 @@ class ResNeXt101WSL(fluid.dygraph.Layer):
self.layers = layers self.layers = layers
self.cardinality = cardinality self.cardinality = cardinality
self.width = width self.width = width
self.scale = width//8 self.scale = width // 8
self.depth = [3, 4, 23, 3] self.depth = [3, 4, 23, 3]
self.base_width = cardinality * width self.base_width = cardinality * width
num_filters = [self.base_width*i for i in [1,2,4,8]] #[256, 512, 1024, 2048] num_filters = [self.base_width * i
for i in [1, 2, 4, 8]] # [256, 512, 1024, 2048]
self._conv_stem = ConvBNLayer( self._conv_stem = ConvBNLayer(
3, 64, 7, stride=2, act="relu", name="conv1") 3, 64, 7, stride=2, act="relu", name="conv1")
self._pool = Pool2D(pool_size=3, self._pool = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_stride=2,
pool_padding=1,
pool_type="max")
self._conv1_0 = BottleneckBlock( self._conv1_0 = BottleneckBlock(
64, num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.0") 64,
num_filters[0],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer1.0")
self._conv1_1 = BottleneckBlock( self._conv1_1 = BottleneckBlock(
num_filters[0]//(width//8), num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.1") num_filters[0] // (width // 8),
num_filters[0],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer1.1")
self._conv1_2 = BottleneckBlock( self._conv1_2 = BottleneckBlock(
num_filters[0]//(width//8), num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.2") num_filters[0] // (width // 8),
num_filters[0],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer1.2")
self._conv2_0 = BottleneckBlock( self._conv2_0 = BottleneckBlock(
num_filters[0]//(width//8), num_filters[1], stride=2, cardinality=self.cardinality, width=self.width, name="layer2.0") num_filters[0] // (width // 8),
num_filters[1],
stride=2,
cardinality=self.cardinality,
width=self.width,
name="layer2.0")
self._conv2_1 = BottleneckBlock( self._conv2_1 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.1") num_filters[1] // (width // 8),
num_filters[1],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer2.1")
self._conv2_2 = BottleneckBlock( self._conv2_2 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.2") num_filters[1] // (width // 8),
num_filters[1],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer2.2")
self._conv2_3 = BottleneckBlock( self._conv2_3 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.3") num_filters[1] // (width // 8),
num_filters[1],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer2.3")
self._conv3_0 = BottleneckBlock( self._conv3_0 = BottleneckBlock(
num_filters[1]//(width//8), num_filters[2], stride=2, cardinality=self.cardinality, width=self.width, name="layer3.0") num_filters[1] // (width // 8),
num_filters[2],
stride=2,
cardinality=self.cardinality,
width=self.width,
name="layer3.0")
self._conv3_1 = BottleneckBlock( self._conv3_1 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.1") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.1")
self._conv3_2 = BottleneckBlock( self._conv3_2 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.2") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.2")
self._conv3_3 = BottleneckBlock( self._conv3_3 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.3") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.3")
self._conv3_4 = BottleneckBlock( self._conv3_4 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.4") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.4")
self._conv3_5 = BottleneckBlock( self._conv3_5 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.5") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.5")
self._conv3_6 = BottleneckBlock( self._conv3_6 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.6") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.6")
self._conv3_7 = BottleneckBlock( self._conv3_7 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.7") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.7")
self._conv3_8 = BottleneckBlock( self._conv3_8 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.8") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.8")
self._conv3_9 = BottleneckBlock( self._conv3_9 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.9") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.9")
self._conv3_10 = BottleneckBlock( self._conv3_10 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.10") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.10")
self._conv3_11 = BottleneckBlock( self._conv3_11 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.11") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.11")
self._conv3_12 = BottleneckBlock( self._conv3_12 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.12") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.12")
self._conv3_13 = BottleneckBlock( self._conv3_13 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.13") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.13")
self._conv3_14 = BottleneckBlock( self._conv3_14 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.14") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.14")
self._conv3_15 = BottleneckBlock( self._conv3_15 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.15") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.15")
self._conv3_16 = BottleneckBlock( self._conv3_16 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.16") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.16")
self._conv3_17 = BottleneckBlock( self._conv3_17 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.17") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.17")
self._conv3_18 = BottleneckBlock( self._conv3_18 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.18") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.18")
self._conv3_19 = BottleneckBlock( self._conv3_19 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.19") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.19")
self._conv3_20 = BottleneckBlock( self._conv3_20 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.20") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.20")
self._conv3_21 = BottleneckBlock( self._conv3_21 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.21") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.21")
self._conv3_22 = BottleneckBlock( self._conv3_22 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.22") num_filters[2] // (width // 8),
num_filters[2],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer3.22")
self._conv4_0 = BottleneckBlock( self._conv4_0 = BottleneckBlock(
num_filters[2]//(width//8), num_filters[3], stride=2, cardinality=self.cardinality, width=self.width, name="layer4.0") num_filters[2] // (width // 8),
num_filters[3],
stride=2,
cardinality=self.cardinality,
width=self.width,
name="layer4.0")
self._conv4_1 = BottleneckBlock( self._conv4_1 = BottleneckBlock(
num_filters[3]//(width//8), num_filters[3], stride=1, cardinality=self.cardinality, width=self.width, name="layer4.1") num_filters[3] // (width // 8),
num_filters[3],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer4.1")
self._conv4_2 = BottleneckBlock( self._conv4_2 = BottleneckBlock(
num_filters[3]//(width//8), num_filters[3], stride=1, cardinality=self.cardinality, width=self.width, name="layer4.2") num_filters[3] // (width // 8),
num_filters[3],
stride=1,
cardinality=self.cardinality,
width=self.width,
name="layer4.2")
self._avg_pool = Pool2D(pool_type="avg", global_pooling=True) self._avg_pool = AdaptiveAvgPool2d(1)
self._out = Linear(input_dim=num_filters[3]//(width//8), self._out = Linear(
output_dim=class_dim, num_filters[3] // (width // 8),
param_attr=ParamAttr(name="fc.weight"), class_dim,
weight_attr=ParamAttr(name="fc.weight"),
bias_attr=ParamAttr(name="fc.bias")) bias_attr=ParamAttr(name="fc.bias"))
def forward(self, inputs): def forward(self, inputs):
...@@ -225,22 +420,26 @@ class ResNeXt101WSL(fluid.dygraph.Layer): ...@@ -225,22 +420,26 @@ class ResNeXt101WSL(fluid.dygraph.Layer):
x = self._conv4_2(x) x = self._conv4_2(x)
x = self._avg_pool(x) x = self._avg_pool(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
x = self._out(x) x = self._out(x)
return x return x
def ResNeXt101_32x8d_wsl(**args): def ResNeXt101_32x8d_wsl(**args):
model = ResNeXt101WSL(cardinality=32, width=8, **args) model = ResNeXt101WSL(cardinality=32, width=8, **args)
return model return model
def ResNeXt101_32x16d_wsl(**args): def ResNeXt101_32x16d_wsl(**args):
model = ResNeXt101WSL(cardinality=32, width=16, **args) model = ResNeXt101WSL(cardinality=32, width=16, **args)
return model return model
def ResNeXt101_32x32d_wsl(**args): def ResNeXt101_32x32d_wsl(**args):
model = ResNeXt101WSL(cardinality=32, width=32, **args) model = ResNeXt101WSL(cardinality=32, width=32, **args)
return model return model
def ResNeXt101_32x48d_wsl(**args): def ResNeXt101_32x48d_wsl(**args):
model = ResNeXt101WSL(cardinality=32, width=48, **args) model = ResNeXt101WSL(cardinality=32, width=48, **args)
return model return model
...@@ -18,9 +18,11 @@ from __future__ import print_function ...@@ -18,9 +18,11 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -30,7 +32,7 @@ __all__ = [ ...@@ -30,7 +32,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__( def __init__(
self, self,
num_channels, num_channels,
...@@ -44,21 +46,16 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -44,21 +46,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self.is_vd_mode = is_vd_mode self.is_vd_mode = is_vd_mode
self._pool2d_avg = Pool2D( self._pool2d_avg = AvgPool2d(
pool_size=2, kernel_size=2, stride=2, padding=0, ceil_mode=True)
pool_stride=2, self._conv = Conv2d(
pool_padding=0, in_channels=num_channels,
pool_type='avg', out_channels=num_filters,
ceil_mode=True) kernel_size=filter_size,
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -80,7 +77,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -80,7 +77,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -134,11 +131,11 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -134,11 +131,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu') y = paddle.elementwise_add(x=short, y=conv2, act='relu')
return y return y
class ResNeXt(fluid.dygraph.Layer): class ResNeXt(nn.Layer):
def __init__(self, layers=50, class_dim=1000, cardinality=32): def __init__(self, layers=50, class_dim=1000, cardinality=32):
super(ResNeXt, self).__init__() super(ResNeXt, self).__init__()
...@@ -184,8 +181,7 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -184,8 +181,7 @@ class ResNeXt(fluid.dygraph.Layer):
act='relu', act='relu',
name="conv1_3") name="conv1_3")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
for block in range(len(depth)): for block in range(len(depth)):
...@@ -212,8 +208,7 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -212,8 +208,7 @@ class ResNeXt(fluid.dygraph.Layer):
self.block_list.append(bottleneck_block) self.block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -222,9 +217,8 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -222,9 +217,8 @@ class ResNeXt(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -235,7 +229,7 @@ class ResNeXt(fluid.dygraph.Layer): ...@@ -235,7 +229,7 @@ class ResNeXt(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -17,9 +17,12 @@ from __future__ import print_function ...@@ -17,9 +17,12 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -29,7 +32,7 @@ __all__ = [ ...@@ -29,7 +32,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__( def __init__(
self, self,
num_channels, num_channels,
...@@ -43,21 +46,17 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -43,21 +46,17 @@ class ConvBNLayer(fluid.dygraph.Layer):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self.is_vd_mode = is_vd_mode self.is_vd_mode = is_vd_mode
self._pool2d_avg = Pool2D( self._pool2d_avg = AvgPool2d(
pool_size=2, kernel_size=2, stride=2, padding=0, ceil_mode=True)
pool_stride=2,
pool_padding=0, self._conv = Conv2d(
pool_type='avg', in_channels=num_channels,
ceil_mode=True) out_channels=num_filters,
self._conv = Conv2D( kernel_size=filter_size,
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -79,7 +78,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -79,7 +78,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -136,11 +135,11 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -136,11 +135,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale, act='relu') y = paddle.elementwise_add(x=short, y=scale, act='relu')
return y return y
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -191,15 +190,15 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -191,15 +190,15 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale, act='relu') y = paddle.elementwise_add(x=short, y=scale, act='relu')
return y return y
class SELayer(fluid.dygraph.Layer): class SELayer(nn.Layer):
def __init__(self, num_channels, num_filters, reduction_ratio, name=None): def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
super(SELayer, self).__init__() super(SELayer, self).__init__()
self.pool2d_gap = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_gap = AdaptiveAvgPool2d(1)
self._num_channels = num_channels self._num_channels = num_channels
...@@ -208,34 +207,32 @@ class SELayer(fluid.dygraph.Layer): ...@@ -208,34 +207,32 @@ class SELayer(fluid.dygraph.Layer):
self.squeeze = Linear( self.squeeze = Linear(
num_channels, num_channels,
med_ch, med_ch,
act="relu", weight_attr=ParamAttr(
param_attr=ParamAttr( initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"),
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + "_sqz_weights"),
bias_attr=ParamAttr(name=name + '_sqz_offset')) bias_attr=ParamAttr(name=name + '_sqz_offset'))
stdv = 1.0 / math.sqrt(med_ch * 1.0) stdv = 1.0 / math.sqrt(med_ch * 1.0)
self.excitation = Linear( self.excitation = Linear(
med_ch, med_ch,
num_filters, num_filters,
act="sigmoid", weight_attr=ParamAttr(
param_attr=ParamAttr( initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"),
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + "_exc_weights"),
bias_attr=ParamAttr(name=name + '_exc_offset')) bias_attr=ParamAttr(name=name + '_exc_offset'))
def forward(self, input): def forward(self, input):
pool = self.pool2d_gap(input) pool = self.pool2d_gap(input)
pool = fluid.layers.reshape(pool, shape=[-1, self._num_channels]) pool = paddle.reshape(pool, shape=[-1, self._num_channels])
squeeze = self.squeeze(pool) squeeze = self.squeeze(pool)
squeeze = F.relu(squeeze)
excitation = self.excitation(squeeze) excitation = self.excitation(squeeze)
excitation = fluid.layers.reshape( excitation = F.sigmoid(excitation)
excitation = paddle.reshape(
excitation, shape=[-1, self._num_channels, 1, 1]) excitation, shape=[-1, self._num_channels, 1, 1])
out = input * excitation out = input * excitation
return out return out
class SE_ResNet_vd(fluid.dygraph.Layer): class SE_ResNet_vd(nn.Layer):
def __init__(self, layers=50, class_dim=1000): def __init__(self, layers=50, class_dim=1000):
super(SE_ResNet_vd, self).__init__() super(SE_ResNet_vd, self).__init__()
...@@ -280,8 +277,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer): ...@@ -280,8 +277,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
stride=1, stride=1,
act='relu', act='relu',
name="conv1_3") name="conv1_3")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
if layers >= 50: if layers >= 50:
...@@ -325,8 +321,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer): ...@@ -325,8 +321,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
self.block_list.append(basic_block) self.block_list.append(basic_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -335,9 +330,8 @@ class SE_ResNet_vd(fluid.dygraph.Layer): ...@@ -335,9 +330,8 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc6_weights"),
name="fc6_weights"),
bias_attr=ParamAttr(name="fc6_offset")) bias_attr=ParamAttr(name="fc6_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -348,7 +342,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer): ...@@ -348,7 +342,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -18,15 +18,17 @@ from __future__ import print_function ...@@ -18,15 +18,17 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.fluid.initializer import MSRA from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import MSRA
import math import math
__all__ = [ __all__ = [
"ShuffleNetV2_x0_25", "ShuffleNetV2_x0_33", "ShuffleNetV2_x0_5", "ShuffleNetV2_x0_25", "ShuffleNetV2_x0_33", "ShuffleNetV2_x0_5",
"ShuffleNetV2_x1_0", "ShuffleNetV2_x1_5", "ShuffleNetV2_x2_0", "ShuffleNetV2", "ShuffleNetV2_x1_5", "ShuffleNetV2_x2_0",
"ShuffleNetV2_swish" "ShuffleNetV2_swish"
] ]
...@@ -37,17 +39,16 @@ def channel_shuffle(x, groups): ...@@ -37,17 +39,16 @@ def channel_shuffle(x, groups):
channels_per_group = num_channels // groups channels_per_group = num_channels // groups
# reshape # reshape
x = fluid.layers.reshape( x = paddle.reshape(
x=x, shape=[batchsize, groups, channels_per_group, height, width]) x=x, shape=[batchsize, groups, channels_per_group, height, width])
x = fluid.layers.transpose(x=x, perm=[0, 2, 1, 3, 4]) x = paddle.transpose(x=x, perm=[0, 2, 1, 3, 4])
# flatten # flatten
x = fluid.layers.reshape( x = paddle.reshape(x=x, shape=[batchsize, num_channels, height, width])
x=x, shape=[batchsize, num_channels, height, width])
return x return x
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
filter_size, filter_size,
...@@ -58,24 +59,21 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -58,24 +59,21 @@ class ConvBNLayer(fluid.dygraph.Layer):
num_groups=1, num_groups=1,
if_act=True, if_act=True,
act='relu', act='relu',
name=None, name=None):
use_cudnn=True):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._if_act = if_act self._if_act = if_act
assert act in ['relu', 'swish'], \ assert act in ['relu', 'swish'], \
"supported act are {} but your act is {}".format( "supported act are {} but your act is {}".format(
['relu', 'swish'], act) ['relu', 'swish'], act)
self._act = act self._act = act
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
groups=num_groups, groups=num_groups,
act=None, weight_attr=ParamAttr(
use_cudnn=use_cudnn,
param_attr=ParamAttr(
initializer=MSRA(), name=name + "_weights"), initializer=MSRA(), name=name + "_weights"),
bias_attr=False) bias_attr=False)
...@@ -90,12 +88,11 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -90,12 +88,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
y = self._conv(inputs) y = self._conv(inputs)
y = self._batch_norm(y) y = self._batch_norm(y)
if self._if_act: if self._if_act:
y = fluid.layers.relu( y = F.relu(y) if self._act == 'relu' else F.swish(y)
y) if self._act == 'relu' else fluid.layers.swish(y)
return y return y
class InvertedResidualUnit(fluid.dygraph.Layer): class InvertedResidualUnit(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -130,7 +127,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ...@@ -130,7 +127,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
num_groups=oup_inc, num_groups=oup_inc,
if_act=False, if_act=False,
act=act, act=act,
use_cudnn=False,
name='stage_' + name + '_conv2') name='stage_' + name + '_conv2')
self._conv_linear = ConvBNLayer( self._conv_linear = ConvBNLayer(
num_channels=oup_inc, num_channels=oup_inc,
...@@ -153,7 +149,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ...@@ -153,7 +149,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
num_groups=inp, num_groups=inp,
if_act=False, if_act=False,
act=act, act=act,
use_cudnn=False,
name='stage_' + name + '_conv4') name='stage_' + name + '_conv4')
self._conv_linear_1 = ConvBNLayer( self._conv_linear_1 = ConvBNLayer(
num_channels=inp, num_channels=inp,
...@@ -185,7 +180,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ...@@ -185,7 +180,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
num_groups=oup_inc, num_groups=oup_inc,
if_act=False, if_act=False,
act=act, act=act,
use_cudnn=False,
name='stage_' + name + '_conv2') name='stage_' + name + '_conv2')
self._conv_linear_2 = ConvBNLayer( self._conv_linear_2 = ConvBNLayer(
num_channels=oup_inc, num_channels=oup_inc,
...@@ -200,14 +194,14 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ...@@ -200,14 +194,14 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
if self.benchmodel == 1: if self.benchmodel == 1:
x1, x2 = fluid.layers.split( x1, x2 = paddle.split(
inputs, inputs,
num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2], num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2],
dim=1) axis=1)
x2 = self._conv_pw(x2) x2 = self._conv_pw(x2)
x2 = self._conv_dw(x2) x2 = self._conv_dw(x2)
x2 = self._conv_linear(x2) x2 = self._conv_linear(x2)
out = fluid.layers.concat([x1, x2], axis=1) out = paddle.concat([x1, x2], axis=1)
else: else:
x1 = self._conv_dw_1(inputs) x1 = self._conv_dw_1(inputs)
x1 = self._conv_linear_1(x1) x1 = self._conv_linear_1(x1)
...@@ -215,12 +209,12 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ...@@ -215,12 +209,12 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
x2 = self._conv_pw_2(inputs) x2 = self._conv_pw_2(inputs)
x2 = self._conv_dw_2(x2) x2 = self._conv_dw_2(x2)
x2 = self._conv_linear_2(x2) x2 = self._conv_linear_2(x2)
out = fluid.layers.concat([x1, x2], axis=1) out = paddle.concat([x1, x2], axis=1)
return channel_shuffle(out, 2) return channel_shuffle(out, 2)
class ShuffleNet(fluid.dygraph.Layer): class ShuffleNet(nn.Layer):
def __init__(self, class_dim=1000, scale=1.0, act='relu'): def __init__(self, class_dim=1000, scale=1.0, act='relu'):
super(ShuffleNet, self).__init__() super(ShuffleNet, self).__init__()
self.scale = scale self.scale = scale
...@@ -252,8 +246,7 @@ class ShuffleNet(fluid.dygraph.Layer): ...@@ -252,8 +246,7 @@ class ShuffleNet(fluid.dygraph.Layer):
if_act=True, if_act=True,
act=act, act=act,
name='stage1_conv') name='stage1_conv')
self._max_pool = Pool2D( self._max_pool = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_type='max', pool_size=3, pool_stride=2, pool_padding=1)
# 2. bottleneck sequences # 2. bottleneck sequences
self._block_list = [] self._block_list = []
...@@ -298,13 +291,13 @@ class ShuffleNet(fluid.dygraph.Layer): ...@@ -298,13 +291,13 @@ class ShuffleNet(fluid.dygraph.Layer):
name='conv5') name='conv5')
# 4. pool # 4. pool
self._pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self._pool2d_avg = AdaptiveAvgPool2d(1)
self._out_c = stage_out_channels[-1] self._out_c = stage_out_channels[-1]
# 5. fc # 5. fc
self._fc = Linear( self._fc = Linear(
stage_out_channels[-1], stage_out_channels[-1],
class_dim, class_dim,
param_attr=ParamAttr(name='fc6_weights'), weight_attr=ParamAttr(name='fc6_weights'),
bias_attr=ParamAttr(name='fc6_offset')) bias_attr=ParamAttr(name='fc6_offset'))
def forward(self, inputs): def forward(self, inputs):
...@@ -314,7 +307,7 @@ class ShuffleNet(fluid.dygraph.Layer): ...@@ -314,7 +307,7 @@ class ShuffleNet(fluid.dygraph.Layer):
y = inv(y) y = inv(y)
y = self._last_conv(y) y = self._last_conv(y)
y = self._pool2d_avg(y) y = self._pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self._out_c]) y = paddle.reshape(y, shape=[-1, self._out_c])
y = self._fc(y) y = self._fc(y)
return y return y
......
此差异已折叠。
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
__all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"] __all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"]
...@@ -56,7 +58,7 @@ def gen_bottleneck_params(backbone='xception_65'): ...@@ -56,7 +58,7 @@ def gen_bottleneck_params(backbone='xception_65'):
return bottleneck_params return bottleneck_params
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -67,13 +69,13 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -67,13 +69,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=input_channels, in_channels=input_channels,
num_filters=output_channels, out_channels=output_channels,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
param_attr=ParamAttr(name=name + "/weights"), weight_attr=ParamAttr(name=name + "/weights"),
bias_attr=False) bias_attr=False)
self._bn = BatchNorm( self._bn = BatchNorm(
num_channels=output_channels, num_channels=output_channels,
...@@ -89,7 +91,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -89,7 +91,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return self._bn(self._conv(inputs)) return self._bn(self._conv(inputs))
class Seperate_Conv(fluid.dygraph.Layer): class Seperate_Conv(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -100,15 +102,15 @@ class Seperate_Conv(fluid.dygraph.Layer): ...@@ -100,15 +102,15 @@ class Seperate_Conv(fluid.dygraph.Layer):
name=None): name=None):
super(Seperate_Conv, self).__init__() super(Seperate_Conv, self).__init__()
self._conv1 = Conv2D( self._conv1 = Conv2d(
num_channels=input_channels, in_channels=input_channels,
num_filters=input_channels, out_channels=input_channels,
filter_size=filter, kernel_size=filter,
stride=stride, stride=stride,
groups=input_channels, groups=input_channels,
padding=(filter) // 2 * dilation, padding=(filter) // 2 * dilation,
dilation=dilation, dilation=dilation,
param_attr=ParamAttr(name=name + "/depthwise/weights"), weight_attr=ParamAttr(name=name + "/depthwise/weights"),
bias_attr=False) bias_attr=False)
self._bn1 = BatchNorm( self._bn1 = BatchNorm(
input_channels, input_channels,
...@@ -119,14 +121,14 @@ class Seperate_Conv(fluid.dygraph.Layer): ...@@ -119,14 +121,14 @@ class Seperate_Conv(fluid.dygraph.Layer):
bias_attr=ParamAttr(name=name + "/depthwise/BatchNorm/beta"), bias_attr=ParamAttr(name=name + "/depthwise/BatchNorm/beta"),
moving_mean_name=name + "/depthwise/BatchNorm/moving_mean", moving_mean_name=name + "/depthwise/BatchNorm/moving_mean",
moving_variance_name=name + "/depthwise/BatchNorm/moving_variance") moving_variance_name=name + "/depthwise/BatchNorm/moving_variance")
self._conv2 = Conv2D( self._conv2 = Conv2d(
input_channels, input_channels,
output_channels, output_channels,
1, 1,
stride=1, stride=1,
groups=1, groups=1,
padding=0, padding=0,
param_attr=ParamAttr(name=name + "/pointwise/weights"), weight_attr=ParamAttr(name=name + "/pointwise/weights"),
bias_attr=False) bias_attr=False)
self._bn2 = BatchNorm( self._bn2 = BatchNorm(
output_channels, output_channels,
...@@ -146,7 +148,7 @@ class Seperate_Conv(fluid.dygraph.Layer): ...@@ -146,7 +148,7 @@ class Seperate_Conv(fluid.dygraph.Layer):
return x return x
class Xception_Block(fluid.dygraph.Layer): class Xception_Block(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -226,11 +228,11 @@ class Xception_Block(fluid.dygraph.Layer): ...@@ -226,11 +228,11 @@ class Xception_Block(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
if not self.activation_fn_in_separable_conv: if not self.activation_fn_in_separable_conv:
x = fluid.layers.relu(inputs) x = F.relu(inputs)
x = self._conv1(x) x = self._conv1(x)
x = fluid.layers.relu(x) x = F.relu(x)
x = self._conv2(x) x = self._conv2(x)
x = fluid.layers.relu(x) x = F.relu(x)
x = self._conv3(x) x = self._conv3(x)
else: else:
x = self._conv1(inputs) x = self._conv1(inputs)
...@@ -242,10 +244,10 @@ class Xception_Block(fluid.dygraph.Layer): ...@@ -242,10 +244,10 @@ class Xception_Block(fluid.dygraph.Layer):
skip = self._short(inputs) skip = self._short(inputs)
else: else:
skip = inputs skip = inputs
return fluid.layers.elementwise_add(x, skip) return paddle.elementwise_add(x, skip)
class XceptionDeeplab(fluid.dygraph.Layer): class XceptionDeeplab(nn.Layer):
def __init__(self, backbone, class_dim=1000): def __init__(self, backbone, class_dim=1000):
super(XceptionDeeplab, self).__init__() super(XceptionDeeplab, self).__init__()
...@@ -344,12 +346,12 @@ class XceptionDeeplab(fluid.dygraph.Layer): ...@@ -344,12 +346,12 @@ class XceptionDeeplab(fluid.dygraph.Layer):
self.stride = s self.stride = s
self._drop = Dropout(p=0.5) self._drop = Dropout(p=0.5, mode="downscale_in_infer")
self._pool = Pool2D(pool_type="avg", global_pooling=True) self._pool = AdaptiveAvgPool2d(1)
self._fc = Linear( self._fc = Linear(
self.chns[1][-1], self.chns[1][-1],
class_dim, class_dim,
param_attr=ParamAttr(name="fc_weights"), weight_attr=ParamAttr(name="fc_weights"),
bias_attr=ParamAttr(name="fc_bias")) bias_attr=ParamAttr(name="fc_bias"))
def forward(self, inputs): def forward(self, inputs):
...@@ -363,7 +365,7 @@ class XceptionDeeplab(fluid.dygraph.Layer): ...@@ -363,7 +365,7 @@ class XceptionDeeplab(fluid.dygraph.Layer):
x = self._exit_flow_2(x) x = self._exit_flow_2(x)
x = self._drop(x) x = self._drop(x)
x = self._pool(x) x = self._pool(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
x = self._fc(x) x = self._fc(x)
return x return x
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -23,7 +23,7 @@ logging.basicConfig( ...@@ -23,7 +23,7 @@ logging.basicConfig(
def time_zone(sec, fmt): def time_zone(sec, fmt):
real_time = datetime.datetime.now() + datetime.timedelta(hours=8) real_time = datetime.datetime.now()
return real_time.timetuple() return real_time.timetuple()
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册