提交 94a8f50a 编写于 作者: littletomatodonkey's avatar littletomatodonkey

fix effnet and darknet

上级 7bcdf7ad
...@@ -23,7 +23,7 @@ from .se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd, SE_Res ...@@ -23,7 +23,7 @@ from .se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd, SE_Res
from .se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd from .se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet154_vd
from .dpn import DPN68 from .dpn import DPN68
from .densenet import DenseNet121 from .densenet import DenseNet121
from .hrnet import HRNet_W18_C from .hrnet import HRNet_W18_C, HRNet_W30_C, HRNet_W32_C, HRNet_W40_C, HRNet_W44_C, HRNet_W48_C, HRNet_W60_C, HRNet_W64_C, SE_HRNet_W18_C, SE_HRNet_W30_C, SE_HRNet_W32_C, SE_HRNet_W40_C, SE_HRNet_W44_C, SE_HRNet_W48_C, SE_HRNet_W60_C, SE_HRNet_W64_C
from .efficientnet import EfficientNetB0 from .efficientnet import EfficientNetB0
from .resnest import ResNeSt50_fast_1s1x64d, ResNeSt50 from .resnest import ResNeSt50_fast_1s1x64d, ResNeSt50
from .googlenet import GoogLeNet from .googlenet import GoogLeNet
...@@ -39,5 +39,6 @@ from .resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt ...@@ -39,5 +39,6 @@ from .resnext101_wsl import ResNeXt101_32x8d_wsl, ResNeXt101_32x16d_wsl, ResNeXt
from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish
from .squeezenet import SqueezeNet1_0, SqueezeNet1_1 from .squeezenet import SqueezeNet1_0, SqueezeNet1_1
from .vgg import VGG11, VGG13, VGG16, VGG19 from .vgg import VGG11, VGG13, VGG16, VGG19
from .darknet import DarkNet53
from .distillation_models import ResNet50_vd_distill_MobileNetV3_large_x1_0 from .distillation_models import ResNet50_vd_distill_MobileNetV3_large_x1_0
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
from paddle.nn.initializer import Uniform
import math import math
__all__ = ["DarkNet53"] __all__ = ["DarkNet53"]
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -17,14 +20,13 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -17,14 +20,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=input_channels, in_channels=input_channels,
num_filters=output_channels, out_channels=output_channels,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
act=None, weight_attr=ParamAttr(name=name + ".conv.weights"),
param_attr=ParamAttr(name=name + ".conv.weights"),
bias_attr=False) bias_attr=False)
bn_name = name + ".bn" bn_name = name + ".bn"
...@@ -42,7 +44,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -42,7 +44,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return x return x
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(nn.Layer):
def __init__(self, input_channels, output_channels, name=None): def __init__(self, input_channels, output_channels, name=None):
super(BasicBlock, self).__init__() super(BasicBlock, self).__init__()
...@@ -54,10 +56,10 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -54,10 +56,10 @@ class BasicBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = self._conv1(inputs) x = self._conv1(inputs)
x = self._conv2(x) x = self._conv2(x)
return fluid.layers.elementwise_add(x=inputs, y=x) return paddle.elementwise_add(x=inputs, y=x)
class DarkNet(fluid.dygraph.Layer): class DarkNet(nn.Layer):
def __init__(self, class_dim=1000): def __init__(self, class_dim=1000):
super(DarkNet, self).__init__() super(DarkNet, self).__init__()
...@@ -102,15 +104,14 @@ class DarkNet(fluid.dygraph.Layer): ...@@ -102,15 +104,14 @@ class DarkNet(fluid.dygraph.Layer):
self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2") self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2")
self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3") self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3")
self._pool = Pool2D(pool_type="avg", global_pooling=True) self._pool = AdaptiveAvgPool2d(1)
stdv = 1.0 / math.sqrt(1024.0) stdv = 1.0 / math.sqrt(1024.0)
self._out = Linear( self._out = Linear(
input_dim=1024, 1024,
output_dim=class_dim, class_dim,
param_attr=ParamAttr( weight_attr=ParamAttr(
name="fc_weights", name="fc_weights", initializer=Uniform(-stdv, stdv)),
initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -150,7 +151,7 @@ class DarkNet(fluid.dygraph.Layer): ...@@ -150,7 +151,7 @@ class DarkNet(fluid.dygraph.Layer):
x = self._basic_block_44(x) x = self._basic_block_44(x)
x = self._pool(x) x = self._pool(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
x = self._out(x) x = self._out(x)
return x return x
......
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout import paddle.nn.functional as F
from paddle.nn import Conv2d, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2d, MaxPool2d, AvgPool2d
import math import math
import collections import collections
import re import re
...@@ -242,15 +244,14 @@ def _drop_connect(inputs, prob, is_test): ...@@ -242,15 +244,14 @@ def _drop_connect(inputs, prob, is_test):
if is_test: if is_test:
return inputs return inputs
keep_prob = 1.0 - prob keep_prob = 1.0 - prob
inputs_shape = fluid.layers.shape(inputs) inputs_shape = paddle.shape(inputs)
random_tensor = keep_prob + fluid.layers.uniform_random( random_tensor = keep_prob + paddle.rand(shape=[inputs_shape[0], 1, 1, 1])
shape=[inputs_shape[0], 1, 1, 1], min=0., max=1.) binary_tensor = paddle.floor(random_tensor)
binary_tensor = fluid.layers.floor(random_tensor)
output = inputs / keep_prob * binary_tensor output = inputs / keep_prob * binary_tensor
return output return output
class Conv2ds(fluid.dygraph.Layer): class Conv2ds(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -265,6 +266,8 @@ class Conv2ds(fluid.dygraph.Layer): ...@@ -265,6 +266,8 @@ class Conv2ds(fluid.dygraph.Layer):
model_name=None, model_name=None,
cur_stage=None): cur_stage=None):
super(Conv2ds, self).__init__() super(Conv2ds, self).__init__()
assert act in [None, "swish", "sigmoid"]
self.act = act
param_attr, bias_attr = initial_type(name=name, use_bias=use_bias) param_attr, bias_attr = initial_type(name=name, use_bias=use_bias)
...@@ -296,25 +299,31 @@ class Conv2ds(fluid.dygraph.Layer): ...@@ -296,25 +299,31 @@ class Conv2ds(fluid.dygraph.Layer):
else: else:
padding = padding_type padding = padding_type
self._conv = Conv2D( groups = 1 if groups is None else groups
self._conv = Conv2d(
input_channels, input_channels,
output_channels, output_channels,
filter_size, filter_size,
groups=groups, groups=groups,
stride=stride, stride=stride,
act=act, # act=act,
padding=padding, padding=padding,
param_attr=param_attr, weight_attr=param_attr,
bias_attr=bias_attr) bias_attr=bias_attr)
def forward(self, inputs): def forward(self, inputs):
x = self._conv(inputs) x = self._conv(inputs)
if self.act == "swish":
x = F.swish(x)
elif self.act == "sigmoid":
x = F.sigmoid(x)
if self.need_crop: if self.need_crop:
x = x[:, :, 1:, 1:] x = x[:, :, 1:, 1:]
return x return x
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
filter_size, filter_size,
...@@ -369,7 +378,7 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -369,7 +378,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class ExpandConvNorm(fluid.dygraph.Layer): class ExpandConvNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -402,7 +411,7 @@ class ExpandConvNorm(fluid.dygraph.Layer): ...@@ -402,7 +411,7 @@ class ExpandConvNorm(fluid.dygraph.Layer):
return inputs return inputs
class DepthwiseConvNorm(fluid.dygraph.Layer): class DepthwiseConvNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -436,7 +445,7 @@ class DepthwiseConvNorm(fluid.dygraph.Layer): ...@@ -436,7 +445,7 @@ class DepthwiseConvNorm(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class ProjectConvNorm(fluid.dygraph.Layer): class ProjectConvNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -464,7 +473,7 @@ class ProjectConvNorm(fluid.dygraph.Layer): ...@@ -464,7 +473,7 @@ class ProjectConvNorm(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class SEBlock(fluid.dygraph.Layer): class SEBlock(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
num_squeezed_channels, num_squeezed_channels,
...@@ -475,8 +484,7 @@ class SEBlock(fluid.dygraph.Layer): ...@@ -475,8 +484,7 @@ class SEBlock(fluid.dygraph.Layer):
cur_stage=None): cur_stage=None):
super(SEBlock, self).__init__() super(SEBlock, self).__init__()
self._pool = Pool2D( self._pool = AdaptiveAvgPool2d(1)
pool_type="avg", global_pooling=True, use_cudnn=False)
self._conv1 = Conv2ds( self._conv1 = Conv2ds(
input_channels, input_channels,
num_squeezed_channels, num_squeezed_channels,
...@@ -499,10 +507,10 @@ class SEBlock(fluid.dygraph.Layer): ...@@ -499,10 +507,10 @@ class SEBlock(fluid.dygraph.Layer):
x = self._pool(inputs) x = self._pool(inputs)
x = self._conv1(x) x = self._conv1(x)
x = self._conv2(x) x = self._conv2(x)
return fluid.layers.elementwise_mul(inputs, x) return paddle.multiply(inputs, x)
class MbConvBlock(fluid.dygraph.Layer): class MbConvBlock(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
block_args, block_args,
...@@ -565,9 +573,9 @@ class MbConvBlock(fluid.dygraph.Layer): ...@@ -565,9 +573,9 @@ class MbConvBlock(fluid.dygraph.Layer):
x = inputs x = inputs
if self.expand_ratio != 1: if self.expand_ratio != 1:
x = self._ecn(x) x = self._ecn(x)
x = fluid.layers.swish(x) x = F.swish(x)
x = self._dcn(x) x = self._dcn(x)
x = fluid.layers.swish(x) x = F.swish(x)
if self.has_se: if self.has_se:
x = self._se(x) x = self._se(x)
x = self._pcn(x) x = self._pcn(x)
...@@ -576,11 +584,11 @@ class MbConvBlock(fluid.dygraph.Layer): ...@@ -576,11 +584,11 @@ class MbConvBlock(fluid.dygraph.Layer):
self.block_args.input_filters == self.block_args.output_filters: self.block_args.input_filters == self.block_args.output_filters:
if self.drop_connect_rate: if self.drop_connect_rate:
x = _drop_connect(x, self.drop_connect_rate, self.is_test) x = _drop_connect(x, self.drop_connect_rate, self.is_test)
x = fluid.layers.elementwise_add(x, inputs) x = paddle.elementwise_add(x, inputs)
return x return x
class ConvStemNorm(fluid.dygraph.Layer): class ConvStemNorm(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
padding_type, padding_type,
...@@ -608,7 +616,7 @@ class ConvStemNorm(fluid.dygraph.Layer): ...@@ -608,7 +616,7 @@ class ConvStemNorm(fluid.dygraph.Layer):
return self._conv(inputs) return self._conv(inputs)
class ExtractFeatures(fluid.dygraph.Layer): class ExtractFeatures(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
_block_args, _block_args,
...@@ -694,13 +702,13 @@ class ExtractFeatures(fluid.dygraph.Layer): ...@@ -694,13 +702,13 @@ class ExtractFeatures(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = self._conv_stem(inputs) x = self._conv_stem(inputs)
x = fluid.layers.swish(x) x = F.swish(x)
for _mc_block in self.conv_seq: for _mc_block in self.conv_seq:
x = _mc_block(x) x = _mc_block(x)
return x return x
class EfficientNet(fluid.dygraph.Layer): class EfficientNet(nn.Layer):
def __init__(self, def __init__(self,
name="b0", name="b0",
is_test=True, is_test=True,
...@@ -753,18 +761,17 @@ class EfficientNet(fluid.dygraph.Layer): ...@@ -753,18 +761,17 @@ class EfficientNet(fluid.dygraph.Layer):
bn_name="_bn1", bn_name="_bn1",
model_name=self.name, model_name=self.name,
cur_stage=7) cur_stage=7)
self._pool = Pool2D(pool_type="avg", global_pooling=True) self._pool = AdaptiveAvgPool2d(1)
if self._global_params.dropout_rate: if self._global_params.dropout_rate:
self._drop = Dropout( self._drop = Dropout(
p=self._global_params.dropout_rate, p=self._global_params.dropout_rate, mode="upscale_in_train")
dropout_implementation="upscale_in_train")
param_attr, bias_attr = init_fc_layer("_fc") param_attr, bias_attr = init_fc_layer("_fc")
self._fc = Linear( self._fc = Linear(
output_channels, output_channels,
class_dim, class_dim,
param_attr=param_attr, weight_attr=param_attr,
bias_attr=bias_attr) bias_attr=bias_attr)
def forward(self, inputs): def forward(self, inputs):
...@@ -773,7 +780,7 @@ class EfficientNet(fluid.dygraph.Layer): ...@@ -773,7 +780,7 @@ class EfficientNet(fluid.dygraph.Layer):
x = self._pool(x) x = self._pool(x)
if self._global_params.dropout_rate: if self._global_params.dropout_rate:
x = self._drop(x) x = self._drop(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
x = self._fc(x) x = self._fc(x)
return x return x
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册