提交 6b7b4a7f 编写于 作者: littletomatodonkey's avatar littletomatodonkey

batch fix pool2d

上级 a0ed3fef
...@@ -2,7 +2,8 @@ import paddle ...@@ -2,7 +2,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout, ReLU from paddle.nn import Conv2d, BatchNorm, Linear, Dropout, ReLU
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform from paddle.nn.initializer import Uniform
import math import math
...@@ -35,8 +36,7 @@ class ConvPoolLayer(nn.Layer): ...@@ -35,8 +36,7 @@ class ConvPoolLayer(nn.Layer):
name=name + "_weights", initializer=Uniform(-stdv, stdv)), name=name + "_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr( bias_attr=ParamAttr(
name=name + "_offset", initializer=Uniform(-stdv, stdv))) name=name + "_offset", initializer=Uniform(-stdv, stdv)))
self._pool = Pool2D( self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
pool_size=3, pool_stride=2, pool_padding=0, pool_type="max")
def forward(self, inputs): def forward(self, inputs):
x = self._conv(inputs) x = self._conv(inputs)
......
...@@ -20,7 +20,8 @@ import numpy as np ...@@ -20,7 +20,8 @@ import numpy as np
import paddle import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform from paddle.nn.initializer import Uniform
import math import math
...@@ -144,7 +145,7 @@ class TransitionLayer(nn.Layer): ...@@ -144,7 +145,7 @@ class TransitionLayer(nn.Layer):
stride=1, stride=1,
name=name) name=name)
self.pool2d_avg = Pool2D(pool_size=2, pool_stride=2, pool_type='avg') self.pool2d_avg = AvgPool2d(kernel_size=2, stride=2, padding=0)
def forward(self, input): def forward(self, input):
y = self.conv_ac_func(input) y = self.conv_ac_func(input)
...@@ -213,8 +214,7 @@ class DenseNet(nn.Layer): ...@@ -213,8 +214,7 @@ class DenseNet(nn.Layer):
act='relu', act='relu',
name="conv1") name="conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_config = block_config self.block_config = block_config
...@@ -256,7 +256,7 @@ class DenseNet(nn.Layer): ...@@ -256,7 +256,7 @@ class DenseNet(nn.Layer):
moving_mean_name='conv5_blk_bn_mean', moving_mean_name='conv5_blk_bn_mean',
moving_variance_name='conv5_blk_bn_variance') moving_variance_name='conv5_blk_bn_variance')
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
stdv = 1.0 / math.sqrt(num_features * 1.0) stdv = 1.0 / math.sqrt(num_features * 1.0)
......
...@@ -21,7 +21,8 @@ import sys ...@@ -21,7 +21,8 @@ import sys
import paddle import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear from paddle.nn import Conv2d, BatchNorm, Linear
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform from paddle.nn.initializer import Uniform
import math import math
...@@ -235,8 +236,7 @@ class DPN(nn.Layer): ...@@ -235,8 +236,7 @@ class DPN(nn.Layer):
act='relu', act='relu',
name="conv1") name="conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
num_channel_dpn = init_num_filter num_channel_dpn = init_num_filter
...@@ -301,7 +301,7 @@ class DPN(nn.Layer): ...@@ -301,7 +301,7 @@ class DPN(nn.Layer):
moving_mean_name='final_concat_bn_mean', moving_mean_name='final_concat_bn_mean',
moving_variance_name='final_concat_bn_variance') moving_variance_name='final_concat_bn_variance')
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
stdv = 0.01 stdv = 0.01
......
...@@ -2,7 +2,8 @@ import paddle ...@@ -2,7 +2,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform from paddle.nn.initializer import Uniform
import math import math
...@@ -72,8 +73,8 @@ class Inception(nn.Layer): ...@@ -72,8 +73,8 @@ class Inception(nn.Layer):
name="inception_" + name + "_5x5_reduce") name="inception_" + name + "_5x5_reduce")
self._conv5 = ConvLayer( self._conv5 = ConvLayer(
filter5R, filter5, 5, name="inception_" + name + "_5x5") filter5R, filter5, 5, name="inception_" + name + "_5x5")
self._pool = Pool2D( self._pool = MaxPool2d(kernel_size=3, stride=1, padding=1)
pool_size=3, pool_type="max", pool_stride=1, pool_padding=1)
self._convprj = ConvLayer( self._convprj = ConvLayer(
input_channels, proj, 1, name="inception_" + name + "_3x3_proj") input_channels, proj, 1, name="inception_" + name + "_3x3_proj")
...@@ -98,7 +99,7 @@ class GoogleNetDY(nn.Layer): ...@@ -98,7 +99,7 @@ class GoogleNetDY(nn.Layer):
def __init__(self, class_dim=1000): def __init__(self, class_dim=1000):
super(GoogleNetDY, self).__init__() super(GoogleNetDY, self).__init__()
self._conv = ConvLayer(3, 64, 7, 2, name="conv1") self._conv = ConvLayer(3, 64, 7, 2, name="conv1")
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2)
self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1") self._conv_1 = ConvLayer(64, 64, 1, name="conv2_1x1")
self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3") self._conv_2 = ConvLayer(64, 192, 3, name="conv2_3x3")
...@@ -123,7 +124,7 @@ class GoogleNetDY(nn.Layer): ...@@ -123,7 +124,7 @@ class GoogleNetDY(nn.Layer):
self._ince5b = Inception( self._ince5b = Inception(
832, 832, 384, 192, 384, 48, 128, 128, name="ince5b") 832, 832, 384, 192, 384, 48, 128, 128, name="ince5b")
self._pool_5 = Pool2D(pool_size=7, pool_type='avg', pool_stride=7) self._pool_5 = AvgPool2d(kernel_size=7, stride=7)
self._drop = Dropout(p=0.4) self._drop = Dropout(p=0.4)
self._fc_out = Linear( self._fc_out = Linear(
...@@ -131,7 +132,7 @@ class GoogleNetDY(nn.Layer): ...@@ -131,7 +132,7 @@ class GoogleNetDY(nn.Layer):
class_dim, class_dim,
weight_attr=xavier(1024, 1, "out"), weight_attr=xavier(1024, 1, "out"),
bias_attr=ParamAttr(name="out_offset")) bias_attr=ParamAttr(name="out_offset"))
self._pool_o1 = Pool2D(pool_size=5, pool_stride=3, pool_type="avg") self._pool_o1 = AvgPool2d(kernel_size=5, stride=3)
self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1") self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1")
self._fc_o1 = Linear( self._fc_o1 = Linear(
1152, 1152,
...@@ -144,7 +145,7 @@ class GoogleNetDY(nn.Layer): ...@@ -144,7 +145,7 @@ class GoogleNetDY(nn.Layer):
class_dim, class_dim,
weight_attr=xavier(1024, 1, "out1"), weight_attr=xavier(1024, 1, "out1"),
bias_attr=ParamAttr(name="out1_offset")) bias_attr=ParamAttr(name="out1_offset"))
self._pool_o2 = Pool2D(pool_size=5, pool_stride=3, pool_type='avg') self._pool_o2 = AvgPool2d(kernel_size=5, stride=3)
self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2") self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2")
self._fc_o2 = Linear( self._fc_o2 = Linear(
1152, 1152,
......
...@@ -21,7 +21,8 @@ import paddle ...@@ -21,7 +21,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear from paddle.nn import Conv2d, BatchNorm, Linear
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform from paddle.nn.initializer import Uniform
import math import math
...@@ -310,7 +311,7 @@ class SELayer(nn.Layer): ...@@ -310,7 +311,7 @@ class SELayer(nn.Layer):
def __init__(self, num_channels, num_filters, reduction_ratio, name=None): def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
super(SELayer, self).__init__() super(SELayer, self).__init__()
self.pool2d_gap = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_gap = AdaptiveAvgPool2d(1)
self._num_channels = num_channels self._num_channels = num_channels
...@@ -622,7 +623,7 @@ class HRNet(nn.Layer): ...@@ -622,7 +623,7 @@ class HRNet(nn.Layer):
stride=1, stride=1,
name="cls_head_last_conv") name="cls_head_last_conv")
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
stdv = 1.0 / math.sqrt(2048 * 1.0) stdv = 1.0 / math.sqrt(2048 * 1.0)
......
...@@ -16,7 +16,8 @@ import paddle ...@@ -16,7 +16,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform from paddle.nn.initializer import Uniform
import math import math
...@@ -67,7 +68,7 @@ class InceptionStem(nn.Layer): ...@@ -67,7 +68,7 @@ class InceptionStem(nn.Layer):
self._conv_2 = ConvBNLayer(32, 32, 3, act="relu", name="conv2_3x3_s1") self._conv_2 = ConvBNLayer(32, 32, 3, act="relu", name="conv2_3x3_s1")
self._conv_3 = ConvBNLayer( self._conv_3 = ConvBNLayer(
32, 64, 3, padding=1, act="relu", name="conv3_3x3_s1") 32, 64, 3, padding=1, act="relu", name="conv3_3x3_s1")
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
64, 96, 3, stride=2, act="relu", name="inception_stem1_3x3_s2") 64, 96, 3, stride=2, act="relu", name="inception_stem1_3x3_s2")
self._conv1_1 = ConvBNLayer( self._conv1_1 = ConvBNLayer(
...@@ -122,7 +123,7 @@ class InceptionStem(nn.Layer): ...@@ -122,7 +123,7 @@ class InceptionStem(nn.Layer):
class InceptionA(nn.Layer): class InceptionA(nn.Layer):
def __init__(self, name): def __init__(self, name):
super(InceptionA, self).__init__() super(InceptionA, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="avg", pool_padding=1) self._pool = AvgPool2d(kernel_size=3, stride=1, padding=1)
self._conv1 = ConvBNLayer( self._conv1 = ConvBNLayer(
384, 96, 1, act="relu", name="inception_a" + name + "_1x1") 384, 96, 1, act="relu", name="inception_a" + name + "_1x1")
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
...@@ -177,7 +178,7 @@ class InceptionA(nn.Layer): ...@@ -177,7 +178,7 @@ class InceptionA(nn.Layer):
class ReductionA(nn.Layer): class ReductionA(nn.Layer):
def __init__(self): def __init__(self):
super(ReductionA, self).__init__() super(ReductionA, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
384, 384, 3, stride=2, act="relu", name="reduction_a_3x3") 384, 384, 3, stride=2, act="relu", name="reduction_a_3x3")
self._conv3_1 = ConvBNLayer( self._conv3_1 = ConvBNLayer(
...@@ -200,7 +201,7 @@ class ReductionA(nn.Layer): ...@@ -200,7 +201,7 @@ class ReductionA(nn.Layer):
class InceptionB(nn.Layer): class InceptionB(nn.Layer):
def __init__(self, name=None): def __init__(self, name=None):
super(InceptionB, self).__init__() super(InceptionB, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="avg", pool_padding=1) self._pool = AvgPool2d(kernel_size=3, stride=1, padding=1)
self._conv1 = ConvBNLayer( self._conv1 = ConvBNLayer(
1024, 128, 1, act="relu", name="inception_b" + name + "_1x1") 1024, 128, 1, act="relu", name="inception_b" + name + "_1x1")
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
...@@ -277,7 +278,7 @@ class InceptionB(nn.Layer): ...@@ -277,7 +278,7 @@ class InceptionB(nn.Layer):
class ReductionB(nn.Layer): class ReductionB(nn.Layer):
def __init__(self): def __init__(self):
super(ReductionB, self).__init__() super(ReductionB, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="max", pool_stride=2) self._pool = MaxPool2d(kernel_size=3, stride=2, padding=0)
self._conv2_1 = ConvBNLayer( self._conv2_1 = ConvBNLayer(
1024, 192, 1, act="relu", name="reduction_b_3x3_reduce") 1024, 192, 1, act="relu", name="reduction_b_3x3_reduce")
self._conv2_2 = ConvBNLayer( self._conv2_2 = ConvBNLayer(
...@@ -318,7 +319,7 @@ class ReductionB(nn.Layer): ...@@ -318,7 +319,7 @@ class ReductionB(nn.Layer):
class InceptionC(nn.Layer): class InceptionC(nn.Layer):
def __init__(self, name=None): def __init__(self, name=None):
super(InceptionC, self).__init__() super(InceptionC, self).__init__()
self._pool = Pool2D(pool_size=3, pool_type="avg", pool_padding=1) self._pool = AvgPool2d(kernel_size=3, stride=1, padding=1)
self._conv1 = ConvBNLayer( self._conv1 = ConvBNLayer(
1536, 256, 1, act="relu", name="inception_c" + name + "_1x1") 1536, 256, 1, act="relu", name="inception_c" + name + "_1x1")
self._conv2 = ConvBNLayer( self._conv2 = ConvBNLayer(
...@@ -410,7 +411,7 @@ class InceptionV4DY(nn.Layer): ...@@ -410,7 +411,7 @@ class InceptionV4DY(nn.Layer):
self._inceptionC_2 = InceptionC(name="2") self._inceptionC_2 = InceptionC(name="2")
self._inceptionC_3 = InceptionC(name="3") self._inceptionC_3 = InceptionC(name="3")
self.avg_pool = Pool2D(pool_type='avg', global_pooling=True) self.avg_pool = AdaptiveAvgPool2d(1)
self._drop = Dropout(p=0.2) self._drop = Dropout(p=0.2)
stdv = 1.0 / math.sqrt(1536 * 1.0) stdv = 1.0 / math.sqrt(1536 * 1.0)
self.out = Linear( self.out = Linear(
......
...@@ -21,7 +21,8 @@ import paddle ...@@ -21,7 +21,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import MSRA from paddle.nn.initializer import MSRA
import math import math
...@@ -226,7 +227,7 @@ class MobileNet(nn.Layer): ...@@ -226,7 +227,7 @@ class MobileNet(nn.Layer):
name="conv6")) name="conv6"))
self.block_list.append(conv6) self.block_list.append(conv6)
self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
self.out = Linear( self.out = Linear(
int(1024 * scale), int(1024 * scale),
......
...@@ -21,7 +21,8 @@ import paddle ...@@ -21,7 +21,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
import math import math
...@@ -198,7 +199,7 @@ class MobileNet(nn.Layer): ...@@ -198,7 +199,7 @@ class MobileNet(nn.Layer):
padding=0, padding=0,
name="conv9") name="conv9")
self.pool2d_avg = Pool2D(pool_type="avg", global_pooling=True) self.pool2d_avg = AdaptiveAvgPool2d(1)
self.out = Linear( self.out = Linear(
self.out_c, self.out_c,
......
...@@ -21,7 +21,8 @@ import paddle ...@@ -21,7 +21,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
# TODO: need to be removed later! # TODO: need to be removed later!
from paddle.fluid.regularizer import L2Decay from paddle.fluid.regularizer import L2Decay
...@@ -133,8 +134,7 @@ class MobileNetV3(nn.Layer): ...@@ -133,8 +134,7 @@ class MobileNetV3(nn.Layer):
act="hard_swish", act="hard_swish",
name="conv_last") name="conv_last")
self.pool = Pool2D( self.pool = AdaptiveAvgPool2d(1)
pool_type="avg", global_pooling=True, use_cudnn=False)
self.last_conv = Conv2d( self.last_conv = Conv2d(
in_channels=make_divisible(scale * self.cls_ch_squeeze), in_channels=make_divisible(scale * self.cls_ch_squeeze),
...@@ -275,7 +275,7 @@ class ResidualUnit(nn.Layer): ...@@ -275,7 +275,7 @@ class ResidualUnit(nn.Layer):
class SEModule(nn.Layer): class SEModule(nn.Layer):
def __init__(self, channel, reduction=4, name=""): def __init__(self, channel, reduction=4, name=""):
super(SEModule, self).__init__() super(SEModule, self).__init__()
self.avg_pool = Pool2D(pool_type="avg", global_pooling=True) self.avg_pool = AdaptiveAvgPool2d(1)
self.conv1 = Conv2d( self.conv1 = Conv2d(
in_channels=channel, in_channels=channel,
out_channels=channel // reduction, out_channels=channel // reduction,
......
...@@ -18,9 +18,12 @@ from __future__ import print_function ...@@ -18,9 +18,12 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
...@@ -31,7 +34,7 @@ __all__ = [ ...@@ -31,7 +34,7 @@ __all__ = [
] ]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__( def __init__(
self, self,
num_channels, num_channels,
...@@ -43,15 +46,14 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -43,15 +46,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None, ): name=None, ):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
if name == "conv1": if name == "conv1":
bn_name = "bn_" + name bn_name = "bn_" + name
...@@ -71,7 +73,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -71,7 +73,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return y return y
class BottleneckBlock(fluid.dygraph.Layer): class BottleneckBlock(nn.Layer):
def __init__(self, def __init__(self,
num_channels1, num_channels1,
num_channels2, num_channels2,
...@@ -102,8 +104,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -102,8 +104,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
act='relu', act='relu',
name=name + '_branch2b_' + str(s + 1))) name=name + '_branch2b_' + str(s + 1)))
self.conv1_list.append(conv1) self.conv1_list.append(conv1)
self.pool2d_avg = Pool2D( self.pool2d_avg = AvgPool2d(kernel_size=3, stride=stride, padding=1)
pool_size=3, pool_stride=stride, pool_padding=1, pool_type='avg')
self.conv2 = ConvBNLayer( self.conv2 = ConvBNLayer(
num_channels=num_filters, num_channels=num_filters,
...@@ -124,7 +125,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -124,7 +125,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
y = self.conv0(inputs) y = self.conv0(inputs)
xs = fluid.layers.split(y, self.scales, 1) xs = paddle.split(y, self.scales, 1)
ys = [] ys = []
for s, conv1 in enumerate(self.conv1_list): for s, conv1 in enumerate(self.conv1_list):
if s == 0 or self.stride == 2: if s == 0 or self.stride == 2:
...@@ -135,18 +136,18 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -135,18 +136,18 @@ class BottleneckBlock(fluid.dygraph.Layer):
ys.append(xs[-1]) ys.append(xs[-1])
else: else:
ys.append(self.pool2d_avg(xs[-1])) ys.append(self.pool2d_avg(xs[-1]))
conv1 = fluid.layers.concat(ys, axis=1) conv1 = paddle.concat(ys, axis=1)
conv2 = self.conv2(conv1) conv2 = self.conv2(conv1)
if self.shortcut: if self.shortcut:
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu') y = paddle.elementwise_add(x=short, y=conv2, act='relu')
return y return y
class Res2Net(fluid.dygraph.Layer): class Res2Net(nn.Layer):
def __init__(self, layers=50, scales=4, width=26, class_dim=1000): def __init__(self, layers=50, scales=4, width=26, class_dim=1000):
super(Res2Net, self).__init__() super(Res2Net, self).__init__()
...@@ -178,8 +179,7 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -178,8 +179,7 @@ class Res2Net(fluid.dygraph.Layer):
stride=2, stride=2,
act='relu', act='relu',
name="conv1") name="conv1")
self.pool2d_max = Pool2D( self.pool2d_max = MaxPool2d(kernel_size=3, stride=2, padding=1)
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.block_list = [] self.block_list = []
for block in range(len(depth)): for block in range(len(depth)):
...@@ -207,8 +207,7 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -207,8 +207,7 @@ class Res2Net(fluid.dygraph.Layer):
self.block_list.append(bottleneck_block) self.block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = AdaptiveAvgPool2d(1)
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_channels = num_channels[-1] * 2 self.pool2d_avg_channels = num_channels[-1] * 2
...@@ -217,9 +216,8 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -217,9 +216,8 @@ class Res2Net(fluid.dygraph.Layer):
self.out = Linear( self.out = Linear(
self.pool2d_avg_channels, self.pool2d_avg_channels,
class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name="fc_weights"),
name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -228,7 +226,7 @@ class Res2Net(fluid.dygraph.Layer): ...@@ -228,7 +226,7 @@ class Res2Net(fluid.dygraph.Layer):
for block in self.block_list: for block in self.block_list:
y = block(y) y = block(y)
y = self.pool2d_avg(y) y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_channels]) y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y) y = self.out(y)
return y return y
......
...@@ -2,7 +2,8 @@ import paddle ...@@ -2,7 +2,8 @@ import paddle
from paddle import ParamAttr from paddle import ParamAttr
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
__all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"] __all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"]
...@@ -346,7 +347,7 @@ class XceptionDeeplab(nn.Layer): ...@@ -346,7 +347,7 @@ class XceptionDeeplab(nn.Layer):
self.stride = s self.stride = s
self._drop = Dropout(p=0.5) self._drop = Dropout(p=0.5)
self._pool = Pool2D(pool_type="avg", global_pooling=True) self._pool = AdaptiveAvgPool2d(1)
self._fc = Linear( self._fc = Linear(
self.chns[1][-1], self.chns[1][-1],
class_dim, class_dim,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册