未验证 提交 e929d987 编写于 作者: Y yukavio 提交者: GitHub

remove softmax from demo/models (#596)

* fix prune demo batchsize

* fix lr shcedule in prune demo
;

* remove softmax from demo/models
Co-authored-by: Nwanghaoshuang <wanghaoshuang@baidu.com>
...@@ -130,7 +130,6 @@ class MobileNet(): ...@@ -130,7 +130,6 @@ class MobileNet():
with fluid.name_scope('last_fc'): with fluid.name_scope('last_fc'):
output = fluid.layers.fc(input=input, output = fluid.layers.fc(input=input,
size=class_dim, size=class_dim,
act='softmax',
param_attr=ParamAttr( param_attr=ParamAttr(
initializer=MSRA(), initializer=MSRA(),
name="fc7_weights"), name="fc7_weights"),
......
...@@ -110,7 +110,6 @@ class MobileNetV2(): ...@@ -110,7 +110,6 @@ class MobileNetV2():
output = fluid.layers.fc(input=input, output = fluid.layers.fc(input=input,
size=class_dim, size=class_dim,
act='softmax',
param_attr=ParamAttr(name='fc10_weights'), param_attr=ParamAttr(name='fc10_weights'),
bias_attr=ParamAttr(name='fc10_offset')) bias_attr=ParamAttr(name='fc10_offset'))
return output return output
......
...@@ -119,7 +119,6 @@ class MobileNetV3(): ...@@ -119,7 +119,6 @@ class MobileNetV3():
conv = self.hard_swish(conv) conv = self.hard_swish(conv)
out = fluid.layers.fc(input=conv, out = fluid.layers.fc(input=conv,
size=class_dim, size=class_dim,
act='softmax',
param_attr=ParamAttr(name='fc_weights'), param_attr=ParamAttr(name='fc_weights'),
bias_attr=ParamAttr(name='fc_offset')) bias_attr=ParamAttr(name='fc_offset'))
return out return out
...@@ -244,8 +243,7 @@ class MobileNetV3(): ...@@ -244,8 +243,7 @@ class MobileNetV3():
if num_in_filter != num_out_filter or stride != 1: if num_in_filter != num_out_filter or stride != 1:
return conv2 return conv2
else: else:
return fluid.layers.elementwise_add( return fluid.layers.elementwise_add(x=input_data, y=conv2, act=None)
x=input_data, y=conv2, act=None)
def MobileNetV3_small_x0_25(): def MobileNetV3_small_x0_25():
......
...@@ -59,10 +59,8 @@ class PVANet(): ...@@ -59,10 +59,8 @@ class PVANet():
block_configs=[ block_configs=[
BlockConfig(2, '64 48-96 24-48-48 96 128', True, BlockConfig(2, '64 48-96 24-48-48 96 128', True,
BLOCK_TYPE_INCEP), BLOCK_TYPE_INCEP),
BlockConfig(1, '64 64-96 24-48-48 128', True, BlockConfig(1, '64 64-96 24-48-48 128', True, BLOCK_TYPE_INCEP),
BLOCK_TYPE_INCEP), BlockConfig(1, '64 64-96 24-48-48 128', True, BLOCK_TYPE_INCEP),
BlockConfig(1, '64 64-96 24-48-48 128', True,
BLOCK_TYPE_INCEP),
BlockConfig(1, '64 64-96 24-48-48 128', True, BLOCK_TYPE_INCEP) BlockConfig(1, '64 64-96 24-48-48 128', True, BLOCK_TYPE_INCEP)
], ],
name='conv4', name='conv4',
...@@ -76,9 +74,8 @@ class PVANet(): ...@@ -76,9 +74,8 @@ class PVANet():
BlockConfig(1, '64 96-128 32-64-64 196', True, BlockConfig(1, '64 96-128 32-64-64 196', True,
BLOCK_TYPE_INCEP), BLOCK_TYPE_INCEP),
BlockConfig(1, '64 96-128 32-64-64 196', True, BlockConfig(1, '64 96-128 32-64-64 196', True,
BLOCK_TYPE_INCEP), BlockConfig( BLOCK_TYPE_INCEP),
1, '64 96-128 32-64-64 196', True, BlockConfig(1, '64 96-128 32-64-64 196', True, BLOCK_TYPE_INCEP)
BLOCK_TYPE_INCEP)
], ],
name='conv5', name='conv5',
end_points=end_points) end_points=end_points)
...@@ -89,7 +86,6 @@ class PVANet(): ...@@ -89,7 +86,6 @@ class PVANet():
output = fluid.layers.fc(input=input, output = fluid.layers.fc(input=input,
size=class_dim, size=class_dim,
act='softmax',
param_attr=ParamAttr( param_attr=ParamAttr(
initializer=MSRA(), name="fc_weights"), initializer=MSRA(), name="fc_weights"),
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
...@@ -182,9 +178,8 @@ class PVANet(): ...@@ -182,9 +178,8 @@ class PVANet():
conv_stride = stride conv_stride = stride
else: else:
conv_stride = 1 conv_stride = 1
path_net = self._conv_bn_relu(path_net, num_output, path_net = self._conv_bn_relu(path_net, num_output, kernel_size,
kernel_size, name + scope, name + scope, conv_stride)
conv_stride)
paths.append(path_net) paths.append(path_net)
if stride > 1: if stride > 1:
...@@ -359,8 +354,8 @@ class PVANet(): ...@@ -359,8 +354,8 @@ class PVANet():
name, name,
stride=1, stride=1,
groups=1): groups=1):
return self._conv_bn_relu(input, num_filters, filter_size, name, return self._conv_bn_relu(input, num_filters, filter_size, name, stride,
stride, groups) groups)
def Fpn_Fusion(blocks, net): def Fpn_Fusion(blocks, net):
...@@ -433,8 +428,7 @@ def east(input, class_num=31): ...@@ -433,8 +428,7 @@ def east(input, class_num=31):
out[i], k, 1, name='fusion_' + str(len(blocks))) out[i], k, 1, name='fusion_' + str(len(blocks)))
elif j <= 4: elif j <= 4:
conv = net.deconv_bn_layer( conv = net.deconv_bn_layer(
out[i], k, 2 * j, j, j // 2, out[i], k, 2 * j, j, j // 2, name='fusion_' + str(len(blocks)))
name='fusion_' + str(len(blocks)))
else: else:
conv = net.deconv_bn_layer( conv = net.deconv_bn_layer(
out[i], 32, 8, 4, 2, name='fusion_' + str(len(blocks)) + '_1') out[i], 32, 8, 4, 2, name='fusion_' + str(len(blocks)) + '_1')
......
...@@ -105,7 +105,6 @@ class ResNet(): ...@@ -105,7 +105,6 @@ class ResNet():
out = fluid.layers.fc( out = fluid.layers.fc(
input=pool, input=pool,
size=class_dim, size=class_dim,
act='softmax',
name=fc_name, name=fc_name,
param_attr=fluid.param_attr.ParamAttr( param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv))) initializer=fluid.initializer.Uniform(-stdv, stdv)))
...@@ -138,8 +137,7 @@ class ResNet(): ...@@ -138,8 +137,7 @@ class ResNet():
bn_name = "bn" + name[3:] bn_name = "bn" + name[3:]
else: else:
if name.split("_")[1] == "conv1": if name.split("_")[1] == "conv1":
bn_name = name.split("_", 1)[0] + "_bn_" + name.split("_", bn_name = name.split("_", 1)[0] + "_bn_" + name.split("_", 1)[1]
1)[1]
else: else:
bn_name = name.split("_", 1)[0] + "_bn" + name.split("_", bn_name = name.split("_", 1)[0] + "_bn" + name.split("_",
1)[1][3:] 1)[1][3:]
......
...@@ -147,7 +147,8 @@ def compress(args): ...@@ -147,7 +147,8 @@ def compress(args):
# model definition # model definition
model = models.__dict__[args.model]() model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim) out = model.net(input=image, class_dim=class_dim)
avg_cost = paddle.nn.functional.loss.cross_entropy(input=out, label=label) cost = paddle.nn.functional.loss.cross_entropy(input=out, label=label)
avg_cost = paddle.mean(x=cost)
acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1) acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5) acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)
val_program = paddle.static.default_main_program().clone(for_test=True) val_program = paddle.static.default_main_program().clone(for_test=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部