未验证 提交 be35b7cc 编写于 作者: L littletomatodonkey 提交者: GitHub

Merge pull request #182 from littletomatodonkey/dyg_model

add dpn, densenet and hrnet dygraph model
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. import numpy as np
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math
__all__ = [ __all__ = [
"DenseNet", "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201", "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201", "DenseNet264"
"DenseNet264"
] ]
class DenseNet(): class BNACConvLayer(fluid.dygraph.Layer):
def __init__(self, layers=121): def __init__(self,
self.layers = layers num_channels,
num_filters,
filter_size,
stride=1,
pad=0,
groups=1,
act="relu",
name=None):
super(BNACConvLayer, self).__init__()
self._batch_norm = BatchNorm(
num_channels,
act=act,
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=pad,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
def forward(self, input):
y = self._batch_norm(input)
y = self._conv(y)
return y
class DenseLayer(fluid.dygraph.Layer):
def __init__(self, num_channels, growth_rate, bn_size, dropout, name=None):
super(DenseLayer, self).__init__()
self.dropout = dropout
self.bn_ac_func1 = BNACConvLayer(
num_channels=num_channels,
num_filters=bn_size * growth_rate,
filter_size=1,
pad=0,
stride=1,
name=name + "_x1")
self.bn_ac_func2 = BNACConvLayer(
num_channels=bn_size * growth_rate,
num_filters=growth_rate,
filter_size=3,
pad=1,
stride=1,
name=name + "_x2")
if dropout:
self.dropout_func = Dropout(p=dropout)
def forward(self, input):
conv = self.bn_ac_func1(input)
conv = self.bn_ac_func2(conv)
if self.dropout:
conv = self.dropout_func(conv)
conv = fluid.layers.concat([input, conv], axis=1)
return conv
class DenseBlock(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_layers,
bn_size,
growth_rate,
dropout,
name=None):
super(DenseBlock, self).__init__()
self.dropout = dropout
self.dense_layer_func = []
pre_channel = num_channels
for layer in range(num_layers):
self.dense_layer_func.append(
self.add_sublayer(
"{}_{}".format(name, layer + 1),
DenseLayer(
num_channels=pre_channel,
growth_rate=growth_rate,
bn_size=bn_size,
dropout=dropout,
name=name + '_' + str(layer + 1))))
pre_channel = pre_channel + growth_rate
def forward(self, input):
conv = input
for func in self.dense_layer_func:
conv = func(conv)
return conv
class TransitionLayer(fluid.dygraph.Layer):
def __init__(self, num_channels, num_output_features, name=None):
super(TransitionLayer, self).__init__()
self.conv_ac_func = BNACConvLayer(
num_channels=num_channels,
num_filters=num_output_features,
filter_size=1,
pad=0,
stride=1,
name=name)
self.pool2d_avg = Pool2D(pool_size=2, pool_stride=2, pool_type='avg')
def forward(self, input):
y = self.conv_ac_func(input)
y = self.pool2d_avg(y)
return y
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
pad=0,
groups=1,
act="relu",
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=pad,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
def forward(self, input):
y = self._conv(input)
y = self._batch_norm(y)
return y
class DenseNet(fluid.dygraph.Layer):
def __init__(self, layers=60, bn_size=4, dropout=0, class_dim=1000):
super(DenseNet, self).__init__()
def net(self, input, bn_size=4, dropout=0, class_dim=1000):
layers = self.layers
supported_layers = [121, 161, 169, 201, 264] supported_layers = [121, 161, 169, 201, 264]
assert layers in supported_layers, \ assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers) "supported layers are {} but input layer is {}".format(
supported_layers, layers)
densenet_spec = { densenet_spec = {
121: (64, 32, [6, 12, 24, 16]), 121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]), 161: (96, 48, [6, 12, 36, 24]),
...@@ -44,139 +186,86 @@ class DenseNet(): ...@@ -44,139 +186,86 @@ class DenseNet():
201: (64, 32, [6, 12, 48, 32]), 201: (64, 32, [6, 12, 48, 32]),
264: (64, 32, [6, 12, 64, 48]) 264: (64, 32, [6, 12, 64, 48])
} }
num_init_features, growth_rate, block_config = densenet_spec[layers] num_init_features, growth_rate, block_config = densenet_spec[layers]
conv = fluid.layers.conv2d(
input=input, self.conv1_func = ConvBNLayer(
num_channels=3,
num_filters=num_init_features, num_filters=num_init_features,
filter_size=7, filter_size=7,
stride=2, stride=2,
padding=3, pad=3,
act=None,
param_attr=ParamAttr(name="conv1_weights"),
bias_attr=False)
conv = fluid.layers.batch_norm(
input=conv,
act='relu', act='relu',
param_attr=ParamAttr(name='conv1_bn_scale'), name="conv1")
bias_attr=ParamAttr(name='conv1_bn_offset'),
moving_mean_name='conv1_bn_mean', self.pool2d_max = Pool2D(
moving_variance_name='conv1_bn_variance') pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
conv = fluid.layers.pool2d(
input=conv, self.block_config = block_config
pool_size=3,
pool_stride=2, self.dense_block_func_list = []
pool_padding=1, self.transition_func_list = []
pool_type='max') pre_num_channels = num_init_features
num_features = num_init_features num_features = num_init_features
for i, num_layers in enumerate(block_config): for i, num_layers in enumerate(block_config):
conv = self.make_dense_block( self.dense_block_func_list.append(
conv, self.add_sublayer(
num_layers, "db_conv_{}".format(i + 2),
bn_size, DenseBlock(
growth_rate, num_channels=pre_num_channels,
dropout, num_layers=num_layers,
name='conv' + str(i + 2)) bn_size=bn_size,
growth_rate=growth_rate,
dropout=dropout,
name='conv' + str(i + 2))))
num_features = num_features + num_layers * growth_rate num_features = num_features + num_layers * growth_rate
pre_num_channels = num_features
if i != len(block_config) - 1: if i != len(block_config) - 1:
conv = self.make_transition( self.transition_func_list.append(
conv, num_features // 2, name='conv' + str(i + 2) + '_blk') self.add_sublayer(
"tr_conv{}_blk".format(i + 2),
TransitionLayer(
num_channels=pre_num_channels,
num_output_features=num_features // 2,
name='conv' + str(i + 2) + "_blk")))
pre_num_channels = num_features // 2
num_features = num_features // 2 num_features = num_features // 2
conv = fluid.layers.batch_norm(
input=conv, self.batch_norm = BatchNorm(
act='relu', num_features,
act="relu",
param_attr=ParamAttr(name='conv5_blk_bn_scale'), param_attr=ParamAttr(name='conv5_blk_bn_scale'),
bias_attr=ParamAttr(name='conv5_blk_bn_offset'), bias_attr=ParamAttr(name='conv5_blk_bn_offset'),
moving_mean_name='conv5_blk_bn_mean', moving_mean_name='conv5_blk_bn_mean',
moving_variance_name='conv5_blk_bn_variance') moving_variance_name='conv5_blk_bn_variance')
conv = fluid.layers.pool2d(
input=conv, pool_type='avg', global_pooling=True) self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(conv.shape[1] * 1.0)
out = fluid.layers.fc( stdv = 1.0 / math.sqrt(num_features * 1.0)
input=conv,
size=class_dim, self.out = Linear(
param_attr=fluid.param_attr.ParamAttr( num_features,
class_dim,
param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=fluid.initializer.Uniform(-stdv, stdv),
name="fc_weights"), name="fc_weights"),
bias_attr=ParamAttr(name='fc_offset')) bias_attr=ParamAttr(name="fc_offset"))
return out
def make_transition(self, input, num_output_features, name=None): def forward(self, input):
bn_ac = fluid.layers.batch_norm( conv = self.conv1_func(input)
input, conv = self.pool2d_max(conv)
act='relu',
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
bn_ac_conv = fluid.layers.conv2d( for i, num_layers in enumerate(self.block_config):
input=bn_ac, conv = self.dense_block_func_list[i](conv)
num_filters=num_output_features, if i != len(self.block_config) - 1:
filter_size=1, conv = self.transition_func_list[i](conv)
stride=1,
act=None,
bias_attr=False,
param_attr=ParamAttr(name=name + "_weights"))
pool = fluid.layers.pool2d(
input=bn_ac_conv, pool_size=2, pool_stride=2, pool_type='avg')
return pool
def make_dense_block(self,
input,
num_layers,
bn_size,
growth_rate,
dropout,
name=None):
conv = input
for layer in range(num_layers):
conv = self.make_dense_layer(
conv,
growth_rate,
bn_size,
dropout,
name=name + '_' + str(layer + 1))
return conv
def make_dense_layer(self, input, growth_rate, bn_size, dropout, conv = self.batch_norm(conv)
name=None): y = self.pool2d_avg(conv)
bn_ac = fluid.layers.batch_norm( y = fluid.layers.reshape(y, shape=[0, -1])
input, y = self.out(y)
act='relu', return y
param_attr=ParamAttr(name=name + '_x1_bn_scale'),
bias_attr=ParamAttr(name + '_x1_bn_offset'),
moving_mean_name=name + '_x1_bn_mean',
moving_variance_name=name + '_x1_bn_variance')
bn_ac_conv = fluid.layers.conv2d(
input=bn_ac,
num_filters=bn_size * growth_rate,
filter_size=1,
stride=1,
act=None,
bias_attr=False,
param_attr=ParamAttr(name=name + "_x1_weights"))
bn_ac = fluid.layers.batch_norm(
bn_ac_conv,
act='relu',
param_attr=ParamAttr(name=name + '_x2_bn_scale'),
bias_attr=ParamAttr(name + '_x2_bn_offset'),
moving_mean_name=name + '_x2_bn_mean',
moving_variance_name=name + '_x2_bn_variance')
bn_ac_conv = fluid.layers.conv2d(
input=bn_ac,
num_filters=growth_rate,
filter_size=3,
stride=1,
padding=1,
act=None,
bias_attr=False,
param_attr=ParamAttr(name=name + "_x2_weights"))
if dropout:
bn_ac_conv = fluid.layers.dropout(
x=bn_ac_conv, dropout_prob=dropout)
bn_ac_conv = fluid.layers.concat([input, bn_ac_conv], axis=1)
return bn_ac_conv
def DenseNet121(): def DenseNet121():
......
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np import numpy as np
import time
import sys import sys
import math import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
import math
__all__ = [
"DPN",
"DPN68",
"DPN92",
"DPN98",
"DPN107",
"DPN131",
]
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
pad=0,
groups=1,
act="relu",
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=pad,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
def forward(self, input):
y = self._conv(input)
y = self._batch_norm(y)
return y
class BNACConvLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
pad=0,
groups=1,
act="relu",
name=None):
super(BNACConvLayer, self).__init__()
self.num_channels = num_channels
self.name = name
self._batch_norm = BatchNorm(
num_channels,
act=act,
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=pad,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
def forward(self, input):
y = self._batch_norm(input)
y = self._conv(y)
return y
class DualPathFactory(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_1x1_a,
num_3x3_b,
num_1x1_c,
inc,
G,
_type='normal',
name=None):
super(DualPathFactory, self).__init__()
self.num_1x1_c = num_1x1_c
self.inc = inc
self.name = name
kw = 3
kh = 3
pw = (kw - 1) // 2
ph = (kh - 1) // 2
# type
if _type == 'proj':
key_stride = 1
self.has_proj = True
elif _type == 'down':
key_stride = 2
self.has_proj = True
elif _type == 'normal':
key_stride = 1
self.has_proj = False
else:
print("not implemented now!!!")
sys.exit(1)
data_in_ch = sum(num_channels) if isinstance(num_channels,
list) else num_channels
if self.has_proj:
self.c1x1_w_func = BNACConvLayer(
num_channels=data_in_ch,
num_filters=num_1x1_c + 2 * inc,
filter_size=(1, 1),
pad=(0, 0),
stride=(key_stride, key_stride),
name=name + "_match")
self.c1x1_a_func = BNACConvLayer(
num_channels=data_in_ch,
num_filters=num_1x1_a,
filter_size=(1, 1),
pad=(0, 0),
name=name + "_conv1")
self.c3x3_b_func = BNACConvLayer(
num_channels=num_1x1_a,
num_filters=num_3x3_b,
filter_size=(kw, kh),
pad=(pw, ph),
stride=(key_stride, key_stride),
groups=G,
name=name + "_conv2")
self.c1x1_c_func = BNACConvLayer(
num_channels=num_3x3_b,
num_filters=num_1x1_c + inc,
filter_size=(1, 1),
pad=(0, 0),
name=name + "_conv3")
def forward(self, input):
# PROJ
if isinstance(input, list):
data_in = fluid.layers.concat([input[0], input[1]], axis=1)
else:
data_in = input
if self.has_proj:
c1x1_w = self.c1x1_w_func(data_in)
data_o1, data_o2 = fluid.layers.split(
c1x1_w, num_or_sections=[self.num_1x1_c, 2 * self.inc], dim=1)
else:
data_o1 = input[0]
data_o2 = input[1]
c1x1_a = self.c1x1_a_func(data_in)
c3x3_b = self.c3x3_b_func(c1x1_a)
c1x1_c = self.c1x1_c_func(c3x3_b)
c1x1_c1, c1x1_c2 = fluid.layers.split(
c1x1_c, num_or_sections=[self.num_1x1_c, self.inc], dim=1)
# OUTPUTS
summ = fluid.layers.elementwise_add(x=data_o1, y=c1x1_c1)
dense = fluid.layers.concat([data_o2, c1x1_c2], axis=1)
# tensor, channels
return [summ, dense]
__all__ = ["DPN", "DPN68", "DPN92", "DPN98", "DPN107", "DPN131"]
class DPN(fluid.dygraph.Layer):
def __init__(self, layers=60, class_dim=1000):
super(DPN, self).__init__()
class DPN(object): self._class_dim = class_dim
def __init__(self, layers=68):
self.layers = layers
def net(self, input, class_dim=1000): args = self.get_net_args(layers)
# get network args
args = self.get_net_args(self.layers)
bws = args['bw'] bws = args['bw']
inc_sec = args['inc_sec'] inc_sec = args['inc_sec']
rs = args['r'] rs = args['r']
...@@ -45,39 +209,23 @@ class DPN(object): ...@@ -45,39 +209,23 @@ class DPN(object):
init_filter_size = args['init_filter_size'] init_filter_size = args['init_filter_size']
init_padding = args['init_padding'] init_padding = args['init_padding']
## define Dual Path Network self.k_sec = k_sec
# conv1 self.conv1_x_1_func = ConvBNLayer(
conv1_x_1 = fluid.layers.conv2d( num_channels=3,
input=input,
num_filters=init_num_filter, num_filters=init_num_filter,
filter_size=init_filter_size, filter_size=3,
stride=2, stride=2,
padding=init_padding, pad=1,
groups=1,
act=None,
bias_attr=False,
name="conv1",
param_attr=ParamAttr(name="conv1_weights"), )
conv1_x_1 = fluid.layers.batch_norm(
input=conv1_x_1,
act='relu', act='relu',
is_test=False, name="conv1")
name="conv1_bn",
param_attr=ParamAttr(name='conv1_bn_scale'), self.pool2d_max = Pool2D(
bias_attr=ParamAttr('conv1_bn_offset'), pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
moving_mean_name='conv1_bn_mean',
moving_variance_name='conv1_bn_variance', )
convX_x_x = fluid.layers.pool2d(
input=conv1_x_1,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max',
name="pool1")
num_channel_dpn = init_num_filter
self.dpn_func_list = []
#conv2 - conv5 #conv2 - conv5
match_list, num = [], 0 match_list, num = [], 0
for gc in range(4): for gc in range(4):
...@@ -93,43 +241,82 @@ class DPN(object): ...@@ -93,43 +241,82 @@ class DPN(object):
_type2 = 'normal' _type2 = 'normal'
match = match + k_sec[gc - 1] match = match + k_sec[gc - 1]
match_list.append(match) match_list.append(match)
self.dpn_func_list.append(
self.add_sublayer(
"dpn{}".format(match),
DualPathFactory(
num_channels=num_channel_dpn,
num_1x1_a=R,
num_3x3_b=R,
num_1x1_c=bw,
inc=inc,
G=G,
_type=_type1,
name="dpn" + str(match))))
num_channel_dpn = [bw, 3 * inc]
convX_x_x = self.dual_path_factory(
convX_x_x, R, R, bw, inc, G, _type1, name="dpn" + str(match))
for i_ly in range(2, k_sec[gc] + 1): for i_ly in range(2, k_sec[gc] + 1):
num += 1 num += 1
if num in match_list: if num in match_list:
num += 1 num += 1
convX_x_x = self.dual_path_factory( self.dpn_func_list.append(
convX_x_x, R, R, bw, inc, G, _type2, name="dpn" + str(num)) self.add_sublayer(
"dpn{}".format(num),
conv5_x_x = fluid.layers.concat(convX_x_x, axis=1) DualPathFactory(
conv5_x_x = fluid.layers.batch_norm( num_channels=num_channel_dpn,
input=conv5_x_x, num_1x1_a=R,
act='relu', num_3x3_b=R,
is_test=False, num_1x1_c=bw,
name="final_concat_bn", inc=inc,
G=G,
_type=_type2,
name="dpn" + str(num))))
num_channel_dpn = [
num_channel_dpn[0], num_channel_dpn[1] + inc
]
out_channel = sum(num_channel_dpn)
self.conv5_x_x_bn = BatchNorm(
num_channels=sum(num_channel_dpn),
act="relu",
param_attr=ParamAttr(name='final_concat_bn_scale'), param_attr=ParamAttr(name='final_concat_bn_scale'),
bias_attr=ParamAttr('final_concat_bn_offset'), bias_attr=ParamAttr('final_concat_bn_offset'),
moving_mean_name='final_concat_bn_mean', moving_mean_name='final_concat_bn_mean',
moving_variance_name='final_concat_bn_variance', ) moving_variance_name='final_concat_bn_variance')
pool5 = fluid.layers.pool2d(
input=conv5_x_x, self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True)
pool_size=7,
pool_stride=1,
pool_padding=0,
pool_type='avg', )
stdv = 0.01 stdv = 0.01
fc6 = fluid.layers.fc(
input=pool5, self.out = Linear(
size=class_dim, out_channel,
class_dim,
param_attr=ParamAttr( param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=fluid.initializer.Uniform(-stdv, stdv),
name='fc_weights'), name="fc_weights"),
bias_attr=ParamAttr(name='fc_offset')) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, input):
conv1_x_1 = self.conv1_x_1_func(input)
convX_x_x = self.pool2d_max(conv1_x_1)
dpn_idx = 0
for gc in range(4):
convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x)
dpn_idx += 1
for i_ly in range(2, self.k_sec[gc] + 1):
convX_x_x = self.dpn_func_list[dpn_idx](convX_x_x)
dpn_idx += 1
conv5_x_x = fluid.layers.concat(convX_x_x, axis=1)
conv5_x_x = self.conv5_x_x_bn(conv5_x_x)
return fc6 y = self.pool2d_avg(conv5_x_x)
y = fluid.layers.reshape(y, shape=[0, -1])
y = self.out(y)
return y
def get_net_args(self, layers): def get_net_args(self, layers):
if layers == 68: if layers == 68:
...@@ -198,119 +385,6 @@ class DPN(object): ...@@ -198,119 +385,6 @@ class DPN(object):
return net_arg return net_arg
def dual_path_factory(self,
data,
num_1x1_a,
num_3x3_b,
num_1x1_c,
inc,
G,
_type='normal',
name=None):
kw = 3
kh = 3
pw = (kw - 1) // 2
ph = (kh - 1) // 2
# type
if _type is 'proj':
key_stride = 1
has_proj = True
if _type is 'down':
key_stride = 2
has_proj = True
if _type is 'normal':
key_stride = 1
has_proj = False
# PROJ
if type(data) is list:
data_in = fluid.layers.concat([data[0], data[1]], axis=1)
else:
data_in = data
if has_proj:
c1x1_w = self.bn_ac_conv(
data=data_in,
num_filter=(num_1x1_c + 2 * inc),
kernel=(1, 1),
pad=(0, 0),
stride=(key_stride, key_stride),
name=name + "_match")
data_o1, data_o2 = fluid.layers.split(
c1x1_w,
num_or_sections=[num_1x1_c, 2 * inc],
dim=1,
name=name + "_match_conv_Slice")
else:
data_o1 = data[0]
data_o2 = data[1]
# MAIN
c1x1_a = self.bn_ac_conv(
data=data_in,
num_filter=num_1x1_a,
kernel=(1, 1),
pad=(0, 0),
name=name + "_conv1")
c3x3_b = self.bn_ac_conv(
data=c1x1_a,
num_filter=num_3x3_b,
kernel=(kw, kh),
pad=(pw, ph),
stride=(key_stride, key_stride),
num_group=G,
name=name + "_conv2")
c1x1_c = self.bn_ac_conv(
data=c3x3_b,
num_filter=(num_1x1_c + inc),
kernel=(1, 1),
pad=(0, 0),
name=name + "_conv3")
c1x1_c1, c1x1_c2 = fluid.layers.split(
c1x1_c,
num_or_sections=[num_1x1_c, inc],
dim=1,
name=name + "_conv3_Slice")
# OUTPUTS
summ = fluid.layers.elementwise_add(
x=data_o1, y=c1x1_c1, name=name + "_elewise")
dense = fluid.layers.concat(
[data_o2, c1x1_c2], axis=1, name=name + "_concat")
return [summ, dense]
def bn_ac_conv(self,
data,
num_filter,
kernel,
pad,
stride=(1, 1),
num_group=1,
name=None):
bn_ac = fluid.layers.batch_norm(
input=data,
act='relu',
is_test=False,
name=name + '.output.1',
param_attr=ParamAttr(name=name + '_bn_scale'),
bias_attr=ParamAttr(name + '_bn_offset'),
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance', )
bn_ac_conv = fluid.layers.conv2d(
input=bn_ac,
num_filters=num_filter,
filter_size=kernel,
stride=stride,
padding=pad,
groups=num_group,
act=None,
bias_attr=False,
param_attr=ParamAttr(name=name + "_weights"))
return bn_ac_conv
def DPN68(): def DPN68():
model = DPN(layers=68) model = DPN(layers=68)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册