提交 0a2e0a6d 编写于 作者: B Bai Yifan 提交者: whs

Add prefix_name in PaddleDetection (#3556)

上级 2ee6dd97
...@@ -41,7 +41,7 @@ class YOLOv3Head(object): ...@@ -41,7 +41,7 @@ class YOLOv3Head(object):
nms (object): an instance of `MultiClassNMS` nms (object): an instance of `MultiClassNMS`
""" """
__inject__ = ['nms'] __inject__ = ['nms']
__shared__ = ['num_classes'] __shared__ = ['num_classes', 'weight_prefix_name']
def __init__(self, def __init__(self,
norm_decay=0., norm_decay=0.,
...@@ -56,7 +56,8 @@ class YOLOv3Head(object): ...@@ -56,7 +56,8 @@ class YOLOv3Head(object):
nms_top_k=1000, nms_top_k=1000,
keep_top_k=100, keep_top_k=100,
nms_threshold=0.45, nms_threshold=0.45,
background_label=-1).__dict__): background_label=-1).__dict__,
weight_prefix_name=''):
self.norm_decay = norm_decay self.norm_decay = norm_decay
self.num_classes = num_classes self.num_classes = num_classes
self.ignore_thresh = ignore_thresh self.ignore_thresh = ignore_thresh
...@@ -64,6 +65,7 @@ class YOLOv3Head(object): ...@@ -64,6 +65,7 @@ class YOLOv3Head(object):
self.anchor_masks = anchor_masks self.anchor_masks = anchor_masks
self._parse_anchors(anchors) self._parse_anchors(anchors)
self.nms = nms self.nms = nms
self.prefix_name = weight_prefix_name
if isinstance(nms, dict): if isinstance(nms, dict):
self.nms = MultiClassNMS(**nms) self.nms = MultiClassNMS(**nms)
...@@ -208,7 +210,7 @@ class YOLOv3Head(object): ...@@ -208,7 +210,7 @@ class YOLOv3Head(object):
block, block,
channel=512 // (2**i), channel=512 // (2**i),
is_test=(not is_train), is_test=(not is_train),
name="yolo_block.{}".format(i)) name=self.prefix_name + "yolo_block.{}".format(i))
# out channel number = mask_num * (5 + class_num) # out channel number = mask_num * (5 + class_num)
num_filters = len(self.anchor_masks[i]) * (self.num_classes + 5) num_filters = len(self.anchor_masks[i]) * (self.num_classes + 5)
...@@ -219,11 +221,12 @@ class YOLOv3Head(object): ...@@ -219,11 +221,12 @@ class YOLOv3Head(object):
stride=1, stride=1,
padding=0, padding=0,
act=None, act=None,
param_attr=ParamAttr( param_attr=ParamAttr(name=self.prefix_name +
name="yolo_output.{}.conv.weights".format(i)), "yolo_output.{}.conv.weights".format(i)),
bias_attr=ParamAttr( bias_attr=ParamAttr(
regularizer=L2Decay(0.), regularizer=L2Decay(0.),
name="yolo_output.{}.conv.bias".format(i))) name=self.prefix_name +
"yolo_output.{}.conv.bias".format(i)))
outputs.append(block_out) outputs.append(block_out)
if i < len(blocks) - 1: if i < len(blocks) - 1:
...@@ -235,7 +238,7 @@ class YOLOv3Head(object): ...@@ -235,7 +238,7 @@ class YOLOv3Head(object):
stride=1, stride=1,
padding=0, padding=0,
is_test=(not is_train), is_test=(not is_train),
name="yolo_transition.{}".format(i)) name=self.prefix_name + "yolo_transition.{}".format(i))
# upsample # upsample
route = self._upsample(route) route = self._upsample(route)
...@@ -272,7 +275,7 @@ class YOLOv3Head(object): ...@@ -272,7 +275,7 @@ class YOLOv3Head(object):
ignore_thresh=self.ignore_thresh, ignore_thresh=self.ignore_thresh,
downsample_ratio=downsample, downsample_ratio=downsample,
use_label_smooth=self.label_smooth, use_label_smooth=self.label_smooth,
name="yolo_loss" + str(i)) name=self.prefix_name + "yolo_loss" + str(i))
losses.append(fluid.layers.reduce_mean(loss)) losses.append(fluid.layers.reduce_mean(loss))
downsample //= 2 downsample //= 2
...@@ -304,7 +307,7 @@ class YOLOv3Head(object): ...@@ -304,7 +307,7 @@ class YOLOv3Head(object):
class_num=self.num_classes, class_num=self.num_classes,
conf_thresh=self.nms.score_threshold, conf_thresh=self.nms.score_threshold,
downsample_ratio=downsample, downsample_ratio=downsample,
name="yolo_box" + str(i)) name=self.prefix_name + "yolo_box" + str(i))
boxes.append(box) boxes.append(box)
scores.append(fluid.layers.transpose(score, perm=[0, 2, 1])) scores.append(fluid.layers.transpose(score, perm=[0, 2, 1]))
......
...@@ -36,14 +36,19 @@ class DarkNet(object): ...@@ -36,14 +36,19 @@ class DarkNet(object):
norm_type (str): normalization type, 'bn' and 'sync_bn' are supported norm_type (str): normalization type, 'bn' and 'sync_bn' are supported
norm_decay (float): weight decay for normalization layer weights norm_decay (float): weight decay for normalization layer weights
""" """
__shared__ = ['norm_type'] __shared__ = ['norm_type', 'weight_prefix_name']
def __init__(self, depth=53, norm_type='bn', norm_decay=0.): def __init__(self,
depth=53,
norm_type='bn',
norm_decay=0.,
weight_prefix_name=''):
assert depth in [53], "unsupported depth value" assert depth in [53], "unsupported depth value"
self.depth = depth self.depth = depth
self.norm_type = norm_type self.norm_type = norm_type
self.norm_decay = norm_decay self.norm_decay = norm_decay
self.depth_cfg = {53: ([1, 2, 8, 8, 4], self.basicblock)} self.depth_cfg = {53: ([1, 2, 8, 8, 4], self.basicblock)}
self.prefix_name = weight_prefix_name
def _conv_norm(self, def _conv_norm(self,
input, input,
...@@ -143,9 +148,11 @@ class DarkNet(object): ...@@ -143,9 +148,11 @@ class DarkNet(object):
filter_size=3, filter_size=3,
stride=1, stride=1,
padding=1, padding=1,
name="yolo_input") name=self.prefix_name + "yolo_input")
downsample_ = self._downsample( downsample_ = self._downsample(
input=conv, ch_out=conv.shape[1] * 2, name="yolo_input.downsample") input=conv,
ch_out=conv.shape[1] * 2,
name=self.prefix_name + "yolo_input.downsample")
blocks = [] blocks = []
for i, stage in enumerate(stages): for i, stage in enumerate(stages):
block = self.layer_warp( block = self.layer_warp(
...@@ -153,11 +160,11 @@ class DarkNet(object): ...@@ -153,11 +160,11 @@ class DarkNet(object):
input=downsample_, input=downsample_,
ch_out=32 * 2**i, ch_out=32 * 2**i,
count=stage, count=stage,
name="stage.{}".format(i)) name=self.prefix_name + "stage.{}".format(i))
blocks.append(block) blocks.append(block)
if i < len(stages) - 1: # do not downsaple in the last stage if i < len(stages) - 1: # do not downsaple in the last stage
downsample_ = self._downsample( downsample_ = self._downsample(
input=block, input=block,
ch_out=block.shape[1] * 2, ch_out=block.shape[1] * 2,
name="stage.{}.downsample".format(i)) name=self.prefix_name + "stage.{}.downsample".format(i))
return blocks return blocks
...@@ -37,7 +37,7 @@ class MobileNet(object): ...@@ -37,7 +37,7 @@ class MobileNet(object):
with_extra_blocks (bool): if extra blocks should be added with_extra_blocks (bool): if extra blocks should be added
extra_block_filters (list): number of filter for each extra block extra_block_filters (list): number of filter for each extra block
""" """
__shared__ = ['norm_type'] __shared__ = ['norm_type', 'weight_prefix_name']
def __init__(self, def __init__(self,
norm_type='bn', norm_type='bn',
...@@ -46,13 +46,15 @@ class MobileNet(object): ...@@ -46,13 +46,15 @@ class MobileNet(object):
conv_learning_rate=1.0, conv_learning_rate=1.0,
with_extra_blocks=False, with_extra_blocks=False,
extra_block_filters=[[256, 512], [128, 256], [128, 256], extra_block_filters=[[256, 512], [128, 256], [128, 256],
[64, 128]]): [64, 128]],
weight_prefix_name=''):
self.norm_type = norm_type self.norm_type = norm_type
self.norm_decay = norm_decay self.norm_decay = norm_decay
self.conv_group_scale = conv_group_scale self.conv_group_scale = conv_group_scale
self.conv_learning_rate = conv_learning_rate self.conv_learning_rate = conv_learning_rate
self.with_extra_blocks = with_extra_blocks self.with_extra_blocks = with_extra_blocks
self.extra_block_filters = extra_block_filters self.extra_block_filters = extra_block_filters
self.prefix_name = weight_prefix_name
def _conv_norm(self, def _conv_norm(self,
input, input,
...@@ -151,35 +153,42 @@ class MobileNet(object): ...@@ -151,35 +153,42 @@ class MobileNet(object):
blocks = [] blocks = []
# input 1/1 # input 1/1
out = self._conv_norm(input, 3, int(32 * scale), 2, 1, name="conv1") out = self._conv_norm(
input, 3, int(32 * scale), 2, 1, name=self.prefix_name + "conv1")
# 1/2 # 1/2
out = self.depthwise_separable( out = self.depthwise_separable(
out, 32, 64, 32, 1, scale, name="conv2_1") out, 32, 64, 32, 1, scale, name=self.prefix_name + "conv2_1")
out = self.depthwise_separable( out = self.depthwise_separable(
out, 64, 128, 64, 2, scale, name="conv2_2") out, 64, 128, 64, 2, scale, name=self.prefix_name + "conv2_2")
# 1/4 # 1/4
out = self.depthwise_separable( out = self.depthwise_separable(
out, 128, 128, 128, 1, scale, name="conv3_1") out, 128, 128, 128, 1, scale, name=self.prefix_name + "conv3_1")
out = self.depthwise_separable( out = self.depthwise_separable(
out, 128, 256, 128, 2, scale, name="conv3_2") out, 128, 256, 128, 2, scale, name=self.prefix_name + "conv3_2")
# 1/8 # 1/8
blocks.append(out) blocks.append(out)
out = self.depthwise_separable( out = self.depthwise_separable(
out, 256, 256, 256, 1, scale, name="conv4_1") out, 256, 256, 256, 1, scale, name=self.prefix_name + "conv4_1")
out = self.depthwise_separable( out = self.depthwise_separable(
out, 256, 512, 256, 2, scale, name="conv4_2") out, 256, 512, 256, 2, scale, name=self.prefix_name + "conv4_2")
# 1/16 # 1/16
blocks.append(out) blocks.append(out)
for i in range(5): for i in range(5):
out = self.depthwise_separable( out = self.depthwise_separable(
out, 512, 512, 512, 1, scale, name="conv5_" + str(i + 1)) out,
512,
512,
512,
1,
scale,
name=self.prefix_name + "conv5_" + str(i + 1))
module11 = out module11 = out
out = self.depthwise_separable( out = self.depthwise_separable(
out, 512, 1024, 512, 2, scale, name="conv5_6") out, 512, 1024, 512, 2, scale, name=self.prefix_name + "conv5_6")
# 1/32 # 1/32
out = self.depthwise_separable( out = self.depthwise_separable(
out, 1024, 1024, 1024, 1, scale, name="conv6") out, 1024, 1024, 1024, 1, scale, name=self.prefix_name + "conv6")
module13 = out module13 = out
blocks.append(out) blocks.append(out)
if not self.with_extra_blocks: if not self.with_extra_blocks:
...@@ -187,11 +196,15 @@ class MobileNet(object): ...@@ -187,11 +196,15 @@ class MobileNet(object):
num_filters = self.extra_block_filters num_filters = self.extra_block_filters
module14 = self._extra_block(module13, num_filters[0][0], module14 = self._extra_block(module13, num_filters[0][0],
num_filters[0][1], 1, 2, "conv7_1") num_filters[0][1], 1, 2,
self.prefix_name + "conv7_1")
module15 = self._extra_block(module14, num_filters[1][0], module15 = self._extra_block(module14, num_filters[1][0],
num_filters[1][1], 1, 2, "conv7_2") num_filters[1][1], 1, 2,
self.prefix_name + "conv7_2")
module16 = self._extra_block(module15, num_filters[2][0], module16 = self._extra_block(module15, num_filters[2][0],
num_filters[2][1], 1, 2, "conv7_3") num_filters[2][1], 1, 2,
self.prefix_name + "conv7_3")
module17 = self._extra_block(module16, num_filters[3][0], module17 = self._extra_block(module16, num_filters[3][0],
num_filters[3][1], 1, 2, "conv7_4") num_filters[3][1], 1, 2,
self.prefix_name + "conv7_4")
return module11, module13, module14, module15, module16, module17 return module11, module13, module14, module15, module16, module17
...@@ -47,7 +47,7 @@ class ResNet(object): ...@@ -47,7 +47,7 @@ class ResNet(object):
feature_maps (list): index of stages whose feature maps are returned feature_maps (list): index of stages whose feature maps are returned
dcn_v2_stages (list): index of stages who select deformable conv v2 dcn_v2_stages (list): index of stages who select deformable conv v2
""" """
__shared__ = ['norm_type', 'freeze_norm'] __shared__ = ['norm_type', 'freeze_norm', 'weight_prefix_name']
def __init__(self, def __init__(self,
depth=50, depth=50,
...@@ -57,7 +57,8 @@ class ResNet(object): ...@@ -57,7 +57,8 @@ class ResNet(object):
norm_decay=0., norm_decay=0.,
variant='b', variant='b',
feature_maps=[2, 3, 4, 5], feature_maps=[2, 3, 4, 5],
dcn_v2_stages=[]): dcn_v2_stages=[],
weight_prefix_name=''):
super(ResNet, self).__init__() super(ResNet, self).__init__()
if isinstance(feature_maps, Integral): if isinstance(feature_maps, Integral):
...@@ -89,6 +90,7 @@ class ResNet(object): ...@@ -89,6 +90,7 @@ class ResNet(object):
self.stage_filters = [64, 128, 256, 512] self.stage_filters = [64, 128, 256, 512]
self._c1_out_chan_num = 64 self._c1_out_chan_num = 64
self.na = NameAdapter(self) self.na = NameAdapter(self)
self.prefix_name = weight_prefix_name
def _conv_offset(self, def _conv_offset(self,
input, input,
...@@ -121,6 +123,7 @@ class ResNet(object): ...@@ -121,6 +123,7 @@ class ResNet(object):
act=None, act=None,
name=None, name=None,
dcn_v2=False): dcn_v2=False):
_name = self.prefix_name + name if self.prefix_name != '' else name
if not dcn_v2: if not dcn_v2:
conv = fluid.layers.conv2d( conv = fluid.layers.conv2d(
input=input, input=input,
...@@ -130,9 +133,9 @@ class ResNet(object): ...@@ -130,9 +133,9 @@ class ResNet(object):
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, act=None,
param_attr=ParamAttr(name=name + "_weights"), param_attr=ParamAttr(name=_name + "_weights"),
bias_attr=False, bias_attr=False,
name=name + '.conv2d.output.1') name=_name + '.conv2d.output.1')
else: else:
# select deformable conv" # select deformable conv"
offset_mask = self._conv_offset( offset_mask = self._conv_offset(
...@@ -141,7 +144,7 @@ class ResNet(object): ...@@ -141,7 +144,7 @@ class ResNet(object):
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
act=None, act=None,
name=name + "_conv_offset") name=_name + "_conv_offset")
offset_channel = filter_size**2 * 2 offset_channel = filter_size**2 * 2
mask_channel = filter_size**2 mask_channel = filter_size**2
offset, mask = fluid.layers.split( offset, mask = fluid.layers.split(
...@@ -160,11 +163,12 @@ class ResNet(object): ...@@ -160,11 +163,12 @@ class ResNet(object):
groups=groups, groups=groups,
deformable_groups=1, deformable_groups=1,
im2col_step=1, im2col_step=1,
param_attr=ParamAttr(name=name + "_weights"), param_attr=ParamAttr(name=_name + "_weights"),
bias_attr=False, bias_attr=False,
name=name + ".conv2d.output.1") name=_name + ".conv2d.output.1")
bn_name = self.na.fix_conv_norm_name(name) bn_name = self.na.fix_conv_norm_name(name)
bn_name = self.prefix_name + bn_name if self.prefix_name != '' else bn_name
norm_lr = 0. if self.freeze_norm else 1. norm_lr = 0. if self.freeze_norm else 1.
norm_decay = self.norm_decay norm_decay = self.norm_decay
...@@ -420,7 +424,8 @@ class ResNetC5(ResNet): ...@@ -420,7 +424,8 @@ class ResNetC5(ResNet):
freeze_norm=True, freeze_norm=True,
norm_decay=0., norm_decay=0.,
variant='b', variant='b',
feature_maps=[5]): feature_maps=[5],
weight_prefix_name=''):
super(ResNetC5, self).__init__(depth, freeze_at, norm_type, freeze_norm, super(ResNetC5, self).__init__(depth, freeze_at, norm_type, freeze_norm,
norm_decay, variant, feature_maps) norm_decay, variant, feature_maps)
self.severed_head = True self.severed_head = True
...@@ -50,7 +50,8 @@ class ResNeXt(ResNet): ...@@ -50,7 +50,8 @@ class ResNeXt(ResNet):
norm_decay=True, norm_decay=True,
variant='a', variant='a',
feature_maps=[2, 3, 4, 5], feature_maps=[2, 3, 4, 5],
dcn_v2_stages=[]): dcn_v2_stages=[],
weight_prefix_name=''):
assert depth in [50, 101, 152], "depth {} should be 50, 101 or 152" assert depth in [50, 101, 152], "depth {} should be 50, 101 or 152"
super(ResNeXt, self).__init__(depth, freeze_at, norm_type, freeze_norm, super(ResNeXt, self).__init__(depth, freeze_at, norm_type, freeze_norm,
norm_decay, variant, feature_maps) norm_decay, variant, feature_maps)
...@@ -80,7 +81,8 @@ class ResNeXtC5(ResNeXt): ...@@ -80,7 +81,8 @@ class ResNeXtC5(ResNeXt):
freeze_norm=True, freeze_norm=True,
norm_decay=True, norm_decay=True,
variant='a', variant='a',
feature_maps=[5]): feature_maps=[5],
weight_prefix_name=''):
super(ResNeXtC5, self).__init__(depth, groups, group_width, freeze_at, super(ResNeXtC5, self).__init__(depth, groups, group_width, freeze_at,
norm_type, freeze_norm, norm_decay, norm_type, freeze_norm, norm_decay,
variant, feature_maps) variant, feature_maps)
......
...@@ -56,7 +56,8 @@ class SENet(ResNeXt): ...@@ -56,7 +56,8 @@ class SENet(ResNeXt):
variant='d', variant='d',
feature_maps=[2, 3, 4, 5], feature_maps=[2, 3, 4, 5],
dcn_v2_stages=[], dcn_v2_stages=[],
std_senet=False): std_senet=False,
weight_prefix_name=''):
super(SENet, self).__init__(depth, groups, group_width, freeze_at, super(SENet, self).__init__(depth, groups, group_width, freeze_at,
norm_type, freeze_norm, norm_decay, variant, norm_type, freeze_norm, norm_decay, variant,
feature_maps) feature_maps)
...@@ -113,7 +114,8 @@ class SENetC5(SENet): ...@@ -113,7 +114,8 @@ class SENetC5(SENet):
freeze_norm=True, freeze_norm=True,
norm_decay=0., norm_decay=0.,
variant='d', variant='d',
feature_maps=[5]): feature_maps=[5],
weight_prefix_name=''):
super(SENetC5, self).__init__(depth, groups, group_width, freeze_at, super(SENetC5, self).__init__(depth, groups, group_width, freeze_at,
norm_type, freeze_norm, norm_decay, norm_type, freeze_norm, norm_decay,
variant, feature_maps) variant, feature_maps)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册