未验证 提交 f629ce07 编写于 作者: Q qingqing01 提交者: GitHub

Change the transposed conv2d initializer. (#984)

* Change the transposed conv2d initializer.

* small fix.
上级 843a00f7
...@@ -4,6 +4,7 @@ import paddle.fluid as fluid ...@@ -4,6 +4,7 @@ import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Xavier from paddle.fluid.initializer import Xavier
from paddle.fluid.initializer import Constant from paddle.fluid.initializer import Constant
from paddle.fluid.initializer import Bilinear
from paddle.fluid.regularizer import L2Decay from paddle.fluid.regularizer import L2Decay
...@@ -48,14 +49,19 @@ class PyramidBox(object): ...@@ -48,14 +49,19 @@ class PyramidBox(object):
def __init__(self, def __init__(self,
data_shape, data_shape,
num_classes, num_classes,
use_transposed_conv2d=True,
is_infer=False, is_infer=False,
sub_network=False): sub_network=False):
"""
TODO(qingqing): add comments.
"""
self.data_shape = data_shape self.data_shape = data_shape
self.min_sizes = [16., 32., 64., 128., 256., 512.] self.min_sizes = [16., 32., 64., 128., 256., 512.]
self.steps = [4., 8., 16., 32., 64., 128.] self.steps = [4., 8., 16., 32., 64., 128.]
self.num_classes = num_classes
self.use_transposed_conv2d = use_transposed_conv2d
self.is_infer = is_infer self.is_infer = is_infer
self.sub_network = sub_network self.sub_network = sub_network
self.num_classes = num_classes
# the base network is VGG with atrous layers # the base network is VGG with atrous layers
self._input() self._input()
...@@ -120,20 +126,30 @@ class PyramidBox(object): ...@@ -120,20 +126,30 @@ class PyramidBox(object):
b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.)) b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.))
conv1 = fluid.layers.conv2d( conv1 = fluid.layers.conv2d(
up_from, ch, 1, act='relu', bias_attr=b_attr) up_from, ch, 1, act='relu', bias_attr=b_attr)
conv_trans = fluid.layers.conv2d_transpose( if self.use_transposed_conv2d:
conv1, w_attr = ParamAttr(
ch, learning_rate=0.,
output_size=None, regularizer=L2Decay(0.),
filter_size=4, initializer=Bilinear())
padding=1, upsampling = fluid.layers.conv2d_transpose(
stride=2, conv1,
groups=ch, ch,
bias_attr=False) output_size=None,
filter_size=4,
padding=1,
stride=2,
groups=ch,
param_attr=w_attr,
bias_attr=False)
else:
upsampling = fluid.layers.resize_bilinear(
conv1, out_shape=up_to.shape[2:])
b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.)) b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.))
conv2 = fluid.layers.conv2d( conv2 = fluid.layers.conv2d(
up_to, ch, 1, act='relu', bias_attr=b_attr) up_to, ch, 1, act='relu', bias_attr=b_attr)
# eltwise mul # eltwise mul
conv_fuse = conv_trans * conv2 conv_fuse = upsampling * conv2
return conv_fuse return conv_fuse
self.lfpn2_on_conv5 = fpn(self.conv6, self.conv5) self.lfpn2_on_conv5 = fpn(self.conv6, self.conv5)
...@@ -245,6 +261,8 @@ class PyramidBox(object): ...@@ -245,6 +261,8 @@ class PyramidBox(object):
min_sizes=[self.min_sizes[i]], min_sizes=[self.min_sizes[i]],
steps=[self.steps[i]] * 2, steps=[self.steps[i]] * 2,
aspect_ratios=[1.], aspect_ratios=[1.],
clip=False,
flip=True,
offset=0.5) offset=0.5)
box = fluid.layers.reshape(box, shape=[-1, 4]) box = fluid.layers.reshape(box, shape=[-1, 4])
var = fluid.layers.reshape(var, shape=[-1, 4]) var = fluid.layers.reshape(var, shape=[-1, 4])
...@@ -322,6 +340,8 @@ class PyramidBox(object): ...@@ -322,6 +340,8 @@ class PyramidBox(object):
min_sizes=[min_sizes[i]], min_sizes=[min_sizes[i]],
steps=[steps[i]] * 2, steps=[steps[i]] * 2,
aspect_ratios=[1.], aspect_ratios=[1.],
clip=False,
flip=True,
offset=0.5) offset=0.5)
box = fluid.layers.reshape(box, shape=[-1, 4]) box = fluid.layers.reshape(box, shape=[-1, 4])
var = fluid.layers.reshape(var, shape=[-1, 4]) var = fluid.layers.reshape(var, shape=[-1, 4])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册