diff --git a/configs/ttfnet/_base_/pafnet_lite_reader.yml b/configs/ttfnet/_base_/pafnet_lite_reader.yml index 6afbd0b09cbc9fc454584f8dfdf0aadfff24f6e7..f88e8bfffc886d72c1d721cb6631c59e0712aaee 100644 --- a/configs/ttfnet/_base_/pafnet_lite_reader.yml +++ b/configs/ttfnet/_base_/pafnet_lite_reader.yml @@ -18,6 +18,7 @@ TrainReader: shuffle: true drop_last: true use_shared_memory: true + cutmix_epoch: 200 EvalReader: sample_transforms: diff --git a/ppdet/modeling/heads/ttf_head.py b/ppdet/modeling/heads/ttf_head.py index 823b92c0b0355c246a8a21e70f1db1bcd01ee3e1..02bb6d612aef05d148bafcc05afa807526c14244 100644 --- a/ppdet/modeling/heads/ttf_head.py +++ b/ppdet/modeling/heads/ttf_head.py @@ -86,11 +86,13 @@ class HMHead(nn.Layer): head_conv.add_sublayer(name + '.act', nn.ReLU()) self.feat = head_conv bias_init = float(-np.log((1 - 0.01) / 0.01)) + weight_attr = None if lite_head else ParamAttr(initializer=Normal(0, + 0.01)) self.head = nn.Conv2D( in_channels=ch_out, out_channels=num_classes, kernel_size=1, - weight_attr=ParamAttr(initializer=Normal(0, 0.01)), + weight_attr=weight_attr, bias_attr=ParamAttr( learning_rate=2., regularizer=L2Decay(0.), @@ -160,12 +162,14 @@ class WHHead(nn.Layer): learning_rate=2., regularizer=L2Decay(0.)))) head_conv.add_sublayer(name + '.act', nn.ReLU()) + weight_attr = None if lite_head else ParamAttr(initializer=Normal(0, + 0.01)) self.feat = head_conv self.head = nn.Conv2D( in_channels=ch_out, out_channels=4, kernel_size=1, - weight_attr=ParamAttr(initializer=Normal(0, 0.001)), + weight_attr=weight_attr, bias_attr=ParamAttr( learning_rate=2., regularizer=L2Decay(0.)))