From 0a2e0a6d2f5e44fc1387238fe26bd3096c0b351c Mon Sep 17 00:00:00 2001 From: Bai Yifan Date: Mon, 14 Oct 2019 20:11:50 +0800 Subject: [PATCH] Add prefix_name in PaddleDetection (#3556) --- ppdet/modeling/anchor_heads/yolo_head.py | 21 ++++++----- ppdet/modeling/backbones/darknet.py | 19 ++++++---- ppdet/modeling/backbones/mobilenet.py | 45 +++++++++++++++--------- ppdet/modeling/backbones/resnet.py | 21 ++++++----- ppdet/modeling/backbones/resnext.py | 6 ++-- ppdet/modeling/backbones/senet.py | 6 ++-- 6 files changed, 75 insertions(+), 43 deletions(-) diff --git a/ppdet/modeling/anchor_heads/yolo_head.py b/ppdet/modeling/anchor_heads/yolo_head.py index 77dce6549..19829233f 100644 --- a/ppdet/modeling/anchor_heads/yolo_head.py +++ b/ppdet/modeling/anchor_heads/yolo_head.py @@ -41,7 +41,7 @@ class YOLOv3Head(object): nms (object): an instance of `MultiClassNMS` """ __inject__ = ['nms'] - __shared__ = ['num_classes'] + __shared__ = ['num_classes', 'weight_prefix_name'] def __init__(self, norm_decay=0., @@ -56,7 +56,8 @@ class YOLOv3Head(object): nms_top_k=1000, keep_top_k=100, nms_threshold=0.45, - background_label=-1).__dict__): + background_label=-1).__dict__, + weight_prefix_name=''): self.norm_decay = norm_decay self.num_classes = num_classes self.ignore_thresh = ignore_thresh @@ -64,6 +65,7 @@ class YOLOv3Head(object): self.anchor_masks = anchor_masks self._parse_anchors(anchors) self.nms = nms + self.prefix_name = weight_prefix_name if isinstance(nms, dict): self.nms = MultiClassNMS(**nms) @@ -208,7 +210,7 @@ class YOLOv3Head(object): block, channel=512 // (2**i), is_test=(not is_train), - name="yolo_block.{}".format(i)) + name=self.prefix_name + "yolo_block.{}".format(i)) # out channel number = mask_num * (5 + class_num) num_filters = len(self.anchor_masks[i]) * (self.num_classes + 5) @@ -219,11 +221,12 @@ class YOLOv3Head(object): stride=1, padding=0, act=None, - param_attr=ParamAttr( - name="yolo_output.{}.conv.weights".format(i)), + param_attr=ParamAttr(name=self.prefix_name + + "yolo_output.{}.conv.weights".format(i)), bias_attr=ParamAttr( regularizer=L2Decay(0.), - name="yolo_output.{}.conv.bias".format(i))) + name=self.prefix_name + + "yolo_output.{}.conv.bias".format(i))) outputs.append(block_out) if i < len(blocks) - 1: @@ -235,7 +238,7 @@ class YOLOv3Head(object): stride=1, padding=0, is_test=(not is_train), - name="yolo_transition.{}".format(i)) + name=self.prefix_name + "yolo_transition.{}".format(i)) # upsample route = self._upsample(route) @@ -272,7 +275,7 @@ class YOLOv3Head(object): ignore_thresh=self.ignore_thresh, downsample_ratio=downsample, use_label_smooth=self.label_smooth, - name="yolo_loss" + str(i)) + name=self.prefix_name + "yolo_loss" + str(i)) losses.append(fluid.layers.reduce_mean(loss)) downsample //= 2 @@ -304,7 +307,7 @@ class YOLOv3Head(object): class_num=self.num_classes, conf_thresh=self.nms.score_threshold, downsample_ratio=downsample, - name="yolo_box" + str(i)) + name=self.prefix_name + "yolo_box" + str(i)) boxes.append(box) scores.append(fluid.layers.transpose(score, perm=[0, 2, 1])) diff --git a/ppdet/modeling/backbones/darknet.py b/ppdet/modeling/backbones/darknet.py index 8a391b046..37583ab29 100644 --- a/ppdet/modeling/backbones/darknet.py +++ b/ppdet/modeling/backbones/darknet.py @@ -36,14 +36,19 @@ class DarkNet(object): norm_type (str): normalization type, 'bn' and 'sync_bn' are supported norm_decay (float): weight decay for normalization layer weights """ - __shared__ = ['norm_type'] + __shared__ = ['norm_type', 'weight_prefix_name'] - def __init__(self, depth=53, norm_type='bn', norm_decay=0.): + def __init__(self, + depth=53, + norm_type='bn', + norm_decay=0., + weight_prefix_name=''): assert depth in [53], "unsupported depth value" self.depth = depth self.norm_type = norm_type self.norm_decay = norm_decay self.depth_cfg = {53: ([1, 2, 8, 8, 4], self.basicblock)} + self.prefix_name = weight_prefix_name def _conv_norm(self, input, @@ -143,9 +148,11 @@ class DarkNet(object): filter_size=3, stride=1, padding=1, - name="yolo_input") + name=self.prefix_name + "yolo_input") downsample_ = self._downsample( - input=conv, ch_out=conv.shape[1] * 2, name="yolo_input.downsample") + input=conv, + ch_out=conv.shape[1] * 2, + name=self.prefix_name + "yolo_input.downsample") blocks = [] for i, stage in enumerate(stages): block = self.layer_warp( @@ -153,11 +160,11 @@ class DarkNet(object): input=downsample_, ch_out=32 * 2**i, count=stage, - name="stage.{}".format(i)) + name=self.prefix_name + "stage.{}".format(i)) blocks.append(block) if i < len(stages) - 1: # do not downsaple in the last stage downsample_ = self._downsample( input=block, ch_out=block.shape[1] * 2, - name="stage.{}.downsample".format(i)) + name=self.prefix_name + "stage.{}.downsample".format(i)) return blocks diff --git a/ppdet/modeling/backbones/mobilenet.py b/ppdet/modeling/backbones/mobilenet.py index 7e34f4bc7..56afdf964 100644 --- a/ppdet/modeling/backbones/mobilenet.py +++ b/ppdet/modeling/backbones/mobilenet.py @@ -37,7 +37,7 @@ class MobileNet(object): with_extra_blocks (bool): if extra blocks should be added extra_block_filters (list): number of filter for each extra block """ - __shared__ = ['norm_type'] + __shared__ = ['norm_type', 'weight_prefix_name'] def __init__(self, norm_type='bn', @@ -46,13 +46,15 @@ class MobileNet(object): conv_learning_rate=1.0, with_extra_blocks=False, extra_block_filters=[[256, 512], [128, 256], [128, 256], - [64, 128]]): + [64, 128]], + weight_prefix_name=''): self.norm_type = norm_type self.norm_decay = norm_decay self.conv_group_scale = conv_group_scale self.conv_learning_rate = conv_learning_rate self.with_extra_blocks = with_extra_blocks self.extra_block_filters = extra_block_filters + self.prefix_name = weight_prefix_name def _conv_norm(self, input, @@ -151,35 +153,42 @@ class MobileNet(object): blocks = [] # input 1/1 - out = self._conv_norm(input, 3, int(32 * scale), 2, 1, name="conv1") + out = self._conv_norm( + input, 3, int(32 * scale), 2, 1, name=self.prefix_name + "conv1") # 1/2 out = self.depthwise_separable( - out, 32, 64, 32, 1, scale, name="conv2_1") + out, 32, 64, 32, 1, scale, name=self.prefix_name + "conv2_1") out = self.depthwise_separable( - out, 64, 128, 64, 2, scale, name="conv2_2") + out, 64, 128, 64, 2, scale, name=self.prefix_name + "conv2_2") # 1/4 out = self.depthwise_separable( - out, 128, 128, 128, 1, scale, name="conv3_1") + out, 128, 128, 128, 1, scale, name=self.prefix_name + "conv3_1") out = self.depthwise_separable( - out, 128, 256, 128, 2, scale, name="conv3_2") + out, 128, 256, 128, 2, scale, name=self.prefix_name + "conv3_2") # 1/8 blocks.append(out) out = self.depthwise_separable( - out, 256, 256, 256, 1, scale, name="conv4_1") + out, 256, 256, 256, 1, scale, name=self.prefix_name + "conv4_1") out = self.depthwise_separable( - out, 256, 512, 256, 2, scale, name="conv4_2") + out, 256, 512, 256, 2, scale, name=self.prefix_name + "conv4_2") # 1/16 blocks.append(out) for i in range(5): out = self.depthwise_separable( - out, 512, 512, 512, 1, scale, name="conv5_" + str(i + 1)) + out, + 512, + 512, + 512, + 1, + scale, + name=self.prefix_name + "conv5_" + str(i + 1)) module11 = out out = self.depthwise_separable( - out, 512, 1024, 512, 2, scale, name="conv5_6") + out, 512, 1024, 512, 2, scale, name=self.prefix_name + "conv5_6") # 1/32 out = self.depthwise_separable( - out, 1024, 1024, 1024, 1, scale, name="conv6") + out, 1024, 1024, 1024, 1, scale, name=self.prefix_name + "conv6") module13 = out blocks.append(out) if not self.with_extra_blocks: @@ -187,11 +196,15 @@ class MobileNet(object): num_filters = self.extra_block_filters module14 = self._extra_block(module13, num_filters[0][0], - num_filters[0][1], 1, 2, "conv7_1") + num_filters[0][1], 1, 2, + self.prefix_name + "conv7_1") module15 = self._extra_block(module14, num_filters[1][0], - num_filters[1][1], 1, 2, "conv7_2") + num_filters[1][1], 1, 2, + self.prefix_name + "conv7_2") module16 = self._extra_block(module15, num_filters[2][0], - num_filters[2][1], 1, 2, "conv7_3") + num_filters[2][1], 1, 2, + self.prefix_name + "conv7_3") module17 = self._extra_block(module16, num_filters[3][0], - num_filters[3][1], 1, 2, "conv7_4") + num_filters[3][1], 1, 2, + self.prefix_name + "conv7_4") return module11, module13, module14, module15, module16, module17 diff --git a/ppdet/modeling/backbones/resnet.py b/ppdet/modeling/backbones/resnet.py index 81fb25e08..496f5e76c 100644 --- a/ppdet/modeling/backbones/resnet.py +++ b/ppdet/modeling/backbones/resnet.py @@ -47,7 +47,7 @@ class ResNet(object): feature_maps (list): index of stages whose feature maps are returned dcn_v2_stages (list): index of stages who select deformable conv v2 """ - __shared__ = ['norm_type', 'freeze_norm'] + __shared__ = ['norm_type', 'freeze_norm', 'weight_prefix_name'] def __init__(self, depth=50, @@ -57,7 +57,8 @@ class ResNet(object): norm_decay=0., variant='b', feature_maps=[2, 3, 4, 5], - dcn_v2_stages=[]): + dcn_v2_stages=[], + weight_prefix_name=''): super(ResNet, self).__init__() if isinstance(feature_maps, Integral): @@ -89,6 +90,7 @@ class ResNet(object): self.stage_filters = [64, 128, 256, 512] self._c1_out_chan_num = 64 self.na = NameAdapter(self) + self.prefix_name = weight_prefix_name def _conv_offset(self, input, @@ -121,6 +123,7 @@ class ResNet(object): act=None, name=None, dcn_v2=False): + _name = self.prefix_name + name if self.prefix_name != '' else name if not dcn_v2: conv = fluid.layers.conv2d( input=input, @@ -130,9 +133,9 @@ class ResNet(object): padding=(filter_size - 1) // 2, groups=groups, act=None, - param_attr=ParamAttr(name=name + "_weights"), + param_attr=ParamAttr(name=_name + "_weights"), bias_attr=False, - name=name + '.conv2d.output.1') + name=_name + '.conv2d.output.1') else: # select deformable conv" offset_mask = self._conv_offset( @@ -141,7 +144,7 @@ class ResNet(object): stride=stride, padding=(filter_size - 1) // 2, act=None, - name=name + "_conv_offset") + name=_name + "_conv_offset") offset_channel = filter_size**2 * 2 mask_channel = filter_size**2 offset, mask = fluid.layers.split( @@ -160,11 +163,12 @@ class ResNet(object): groups=groups, deformable_groups=1, im2col_step=1, - param_attr=ParamAttr(name=name + "_weights"), + param_attr=ParamAttr(name=_name + "_weights"), bias_attr=False, - name=name + ".conv2d.output.1") + name=_name + ".conv2d.output.1") bn_name = self.na.fix_conv_norm_name(name) + bn_name = self.prefix_name + bn_name if self.prefix_name != '' else bn_name norm_lr = 0. if self.freeze_norm else 1. norm_decay = self.norm_decay @@ -420,7 +424,8 @@ class ResNetC5(ResNet): freeze_norm=True, norm_decay=0., variant='b', - feature_maps=[5]): + feature_maps=[5], + weight_prefix_name=''): super(ResNetC5, self).__init__(depth, freeze_at, norm_type, freeze_norm, norm_decay, variant, feature_maps) self.severed_head = True diff --git a/ppdet/modeling/backbones/resnext.py b/ppdet/modeling/backbones/resnext.py index d9d49e761..545251137 100644 --- a/ppdet/modeling/backbones/resnext.py +++ b/ppdet/modeling/backbones/resnext.py @@ -50,7 +50,8 @@ class ResNeXt(ResNet): norm_decay=True, variant='a', feature_maps=[2, 3, 4, 5], - dcn_v2_stages=[]): + dcn_v2_stages=[], + weight_prefix_name=''): assert depth in [50, 101, 152], "depth {} should be 50, 101 or 152" super(ResNeXt, self).__init__(depth, freeze_at, norm_type, freeze_norm, norm_decay, variant, feature_maps) @@ -80,7 +81,8 @@ class ResNeXtC5(ResNeXt): freeze_norm=True, norm_decay=True, variant='a', - feature_maps=[5]): + feature_maps=[5], + weight_prefix_name=''): super(ResNeXtC5, self).__init__(depth, groups, group_width, freeze_at, norm_type, freeze_norm, norm_decay, variant, feature_maps) diff --git a/ppdet/modeling/backbones/senet.py b/ppdet/modeling/backbones/senet.py index f92d5eaef..09c69ff19 100644 --- a/ppdet/modeling/backbones/senet.py +++ b/ppdet/modeling/backbones/senet.py @@ -56,7 +56,8 @@ class SENet(ResNeXt): variant='d', feature_maps=[2, 3, 4, 5], dcn_v2_stages=[], - std_senet=False): + std_senet=False, + weight_prefix_name=''): super(SENet, self).__init__(depth, groups, group_width, freeze_at, norm_type, freeze_norm, norm_decay, variant, feature_maps) @@ -113,7 +114,8 @@ class SENetC5(SENet): freeze_norm=True, norm_decay=0., variant='d', - feature_maps=[5]): + feature_maps=[5], + weight_prefix_name=''): super(SENetC5, self).__init__(depth, groups, group_width, freeze_at, norm_type, freeze_norm, norm_decay, variant, feature_maps) -- GitLab