提交 09f433d1 编写于 作者: E Eric Zhao

Updated squeezenet to meet python formatter

上级 64ebfae1
import paddle.v2 as paddle import paddle.v2 as paddle
__all__ = ['squeezenet'] __all__ = ['squeezenet']
...@@ -8,53 +7,63 @@ def fire_module(x, chs, squeeze=16, expand=64): ...@@ -8,53 +7,63 @@ def fire_module(x, chs, squeeze=16, expand=64):
squeezer = paddle.layer.img_conv( squeezer = paddle.layer.img_conv(
input=x, input=x,
num_channels=chs, num_channels=chs,
filter_size=(1,1), filter_size=(1, 1),
num_filters=squeeze, num_filters=squeeze,
stride=1, stride=1,
padding=(0,0), padding=(0, 0),
act=paddle.activation.Relu(), act=paddle.activation.Relu(),
bias_attr=False) bias_attr=False)
uno_expander = paddle.layer.img_conv( uno_expander = paddle.layer.img_conv(
input=squeezer, input=squeezer,
filter_size=(1,1), filter_size=(1, 1),
num_filters=squeeze, num_filters=squeeze,
stride=1, stride=1,
padding=(0,0), padding=(0, 0),
act=paddle.activation.Relu(), act=paddle.activation.Relu(),
bias_attr=False) bias_attr=False)
tri_expander = paddle.layer.img_conv( tri_expander = paddle.layer.img_conv(
input=squeezer, input=squeezer,
filter_size=(3,3), filter_size=(3, 3),
num_filters=squeeze, num_filters=squeeze,
stride=1, stride=1,
padding=(1,1), padding=(1, 1),
act=paddle.activation.Relu(), act=paddle.activation.Relu(),
bias_attr=False) bias_attr=False)
return paddle.layer.concat(input=[uno_expander, tri_expander]) return paddle.layer.concat(input=[uno_expander, tri_expander])
def squeezenet(x, class_dim, include_top=True): def squeezenet(x, class_dim, include_top=True):
conv1 = paddle.layer.img_conv( conv1 = paddle.layer.img_conv(
input=x, input=x,
num_channels=3, num_channels=3,
filter_size=(3,3), filter_size=(3, 3),
num_filters=64, num_filters=64,
stride=(2,2), stride=(2, 2),
padding=(0,0), padding=(0, 0),
act=paddle.activation.Relu()) act=paddle.activation.Relu())
pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2, pool_type=paddle.pooling.Max()) pool1 = paddle.layer.img_pool(
input=conv1, pool_size=3, stride=2, pool_type=paddle.pooling.Max())
f1 = fire_module(pool1, 64, squeeze=16, expand=64) f1 = fire_module(pool1, 64, squeeze=16, expand=64)
f2 = fire_module(f1, 32, squeeze=16, expand=64) f2 = fire_module(f1, 32, squeeze=16, expand=64)
pool2 = paddle.layer.img_pool(input=f2, num_channels=32, pool_size=3, stride=2, pool_type=paddle.pooling.Max()) pool2 = paddle.layer.img_pool(
input=f2,
num_channels=32,
pool_size=3,
stride=2,
pool_type=paddle.pooling.Max())
f3 = fire_module(pool2, 32, squeeze=32, expand=128) f3 = fire_module(pool2, 32, squeeze=32, expand=128)
f4 = fire_module(f3, 64, squeeze=32, expand=128) f4 = fire_module(f3, 64, squeeze=32, expand=128)
pool3 = paddle.layer.img_pool(input=f4, num_channels=64, pool_size=3, stride=2, pool_type=paddle.pooling.Max()) pool3 = paddle.layer.img_pool(
input=f4,
num_channels=64,
pool_size=3,
stride=2,
pool_type=paddle.pooling.Max())
f5 = fire_module(pool3, 64, squeeze=48, expand=192) f5 = fire_module(pool3, 64, squeeze=48, expand=192)
f6 = fire_module(f5, 96, squeeze=48, expand=192) f6 = fire_module(f5, 96, squeeze=48, expand=192)
...@@ -66,17 +75,21 @@ def squeezenet(x, class_dim, include_top=True): ...@@ -66,17 +75,21 @@ def squeezenet(x, class_dim, include_top=True):
finalconv = paddle.layer.img_conv( finalconv = paddle.layer.img_conv(
input=drop, input=drop,
num_channels=128, num_channels=128,
filter_size=(1,1), filter_size=(1, 1),
num_filters=class_dim, num_filters=class_dim,
stride=1, stride=1,
padding=(0,0), padding=(0, 0),
act=paddle.activation.Relu(), act=paddle.activation.Relu(),
bias_attr=False) bias_attr=False)
### TODO: I'm trying to implement a global average pooling layer here. ### TODO: I'm trying to implement a global average pooling layer here.
### When I was using this layer, I manually set the pool_size to match the ### When I was using this layer, I manually set the pool_size to match the
### input dimensions. I saw that PaddleFluid has global pooling and wasn't ### input dimensions. I saw that PaddleFluid has global pooling and wasn't
### sure what normal Paddle's equivalent is. ### sure what normal Paddle's equivalent is.
gavg = paddle.layer.img_pool(input=finalconv, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) gavg = paddle.layer.img_pool(
input=finalconv,
pool_size=8,
stride=1,
pool_type=paddle.pooling.Avg())
out = paddle.layer.fc(input=finalconv, out = paddle.layer.fc(input=finalconv,
size=class_dim, size=class_dim,
act=paddle.activation.Softmax()) act=paddle.activation.Softmax())
...@@ -85,9 +98,13 @@ def squeezenet(x, class_dim, include_top=True): ...@@ -85,9 +98,13 @@ def squeezenet(x, class_dim, include_top=True):
### When I was using this layer, I manually set the pool_size to match the ### When I was using this layer, I manually set the pool_size to match the
### input dimensions. I saw that PaddleFluid has global pooling and wasn't ### input dimensions. I saw that PaddleFluid has global pooling and wasn't
### sure what normal Paddle's equivalent is. ### sure what normal Paddle's equivalent is.
gavg = paddle.layer.img_pool(input=f8, num_channels=128, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) gavg = paddle.layer.img_pool(
input=f8,
num_channels=128,
pool_size=8,
stride=1,
pool_type=paddle.pooling.Avg())
out = paddle.layer.fc(input=f8, out = paddle.layer.fc(input=f8,
size=class_dim, size=class_dim,
act=paddle.activation.Softmax()) act=paddle.activation.Softmax())
return out return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册