未验证 提交 b6659ca8 编写于 作者: Y yukavio 提交者: GitHub

complete prune tests (#425)

* fix pruned model save and load, fix bugs of pruning depthwise conv

* add unit test of pruned model save and load

* temp delete

* add unit test

* add some unit tests

* complete autoprune unit test

* add sensitive unit tests

* fix some unit test

* fix auto_pruner.py
上级 8ba34d78
......@@ -113,9 +113,13 @@ class AutoPruner(object):
self._pruned_latency)
init_tokens = self._ratios2tokens(self._init_ratios)
_logger.info("range table: {}".format(self._range_table))
controller = SAController(self._range_table, self._reduce_rate,
self._init_temperature, self._max_try_times,
init_tokens, self._constrain_func)
controller = SAController(
self._range_table,
self._reduce_rate,
self._init_temperature,
self._max_try_times,
init_tokens,
constrain_func=self._constrain_func)
server_ip, server_port = server_addr
if server_ip == None or server_ip == "":
......
......@@ -15,6 +15,7 @@
# limitations under the License.
import logging
import numpy as np
from ..core import GraphWrapper
from ..common import get_logger
from ..core import Registry
......@@ -94,7 +95,7 @@ def optimal_threshold(group, ratio):
list: pruned indexes
"""
name, axis, score = group[
name, axis, score, _ = group[
0] # sort channels by the first convolution's score
score[score < 1e-18] = 1e-18
......@@ -111,6 +112,6 @@ def optimal_threshold(group, ratio):
pruned_idx = np.squeeze(np.argwhere(score < th))
idxs = []
for name, axis, score in group:
for name, axis, score, _ in group:
idxs.append((name, axis, pruned_idx))
return idxs
......@@ -21,7 +21,8 @@ def conv_bn_layer(input,
name,
stride=1,
groups=1,
act=None):
act=None,
bias=False):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
......@@ -31,7 +32,7 @@ def conv_bn_layer(input,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
bias_attr=bias,
name=name + "_out")
bn_name = name + "_bn"
return fluid.layers.batch_norm(
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import unittest
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from paddleslim.prune import AutoPruner
from layers import conv_bn_layer
class TestPrune(unittest.TestCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
# X X O X O
# conv1-->conv2-->sum1-->conv3-->conv4-->sum2-->conv5-->conv6
# | ^ | ^
# |____________| |____________________|
#
# X: prune output channels
# O: prune input channels
with fluid.program_guard(main_program, startup_program):
input = fluid.data(name="image", shape=[None, 3, 16, 16])
conv1 = conv_bn_layer(input, 8, 3, "conv1")
conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
sum1 = conv1 + conv2
conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
sum2 = conv4 + sum1
conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
conv6 = conv_bn_layer(conv5, 8, 3, "conv6")
shapes = {}
params = []
for param in main_program.global_block().all_parameters():
shapes[param.name] = param.shape
if 'weights' in param.name:
params.append(param.name)
val_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
scope = fluid.Scope()
exe.run(startup_program, scope=scope)
pruner = AutoPruner(
val_program,
fluid.global_scope(),
place,
params=params,
init_ratios=[0.33] * len(params),
pruned_flops=0.5,
pruned_latency=None,
server_addr=("", 0),
init_temperature=100,
reduce_rate=0.85,
max_try_times=300,
max_client_num=10,
search_steps=100,
max_ratios=0.9,
min_ratios=0.,
is_server=True,
key="auto_pruner")
baseratio = None
lastratio = None
for i in range(10):
pruned_program, pruned_val_program = pruner.prune(
fluid.default_main_program(), val_program)
score = 0.2
pruner.reward(score)
if i == 0:
baseratio = pruner._current_ratios
if i == 9:
lastratio = pruner._current_ratios
changed = False
for i in range(len(baseratio)):
if baseratio[i] != lastratio[i]:
changed = True
self.assertTrue(changed == True)
if __name__ == '__main__':
unittest.main()
......@@ -51,7 +51,7 @@ class TestPrune(unittest.TestCase):
exe.run(startup_program, scope=scope)
criterion = 'bn_scale'
idx_selector = 'optimal_threshold'
pruner = Pruner(criterion)
pruner = Pruner(criterion, idx_selector=idx_selector)
main_program, _, _ = pruner.prune(
main_program,
scope,
......
......@@ -17,7 +17,7 @@ import unittest
import numpy
import paddle
import paddle.fluid as fluid
from paddleslim.prune import sensitivity
from paddleslim.prune import sensitivity, merge_sensitive, load_sensitivities
from layers import conv_bn_layer
......@@ -60,8 +60,30 @@ class TestSensitivity(unittest.TestCase):
print("acc_val_mean: {}".format(acc_val_mean))
return acc_val_mean
sensitivity(eval_program, place, ["conv4_weights"], eval_func,
"./sensitivities_file")
sensitivity(
eval_program,
place, ["conv4_weights"],
eval_func,
"./sensitivities_file_0",
pruned_ratios=[0.1, 0.2])
sensitivity(
eval_program,
place, ["conv4_weights"],
eval_func,
"./sensitivities_file_1",
pruned_ratios=[0.3, 0.4])
sens_0 = load_sensitivities('./sensitivities_file_0')
sens_1 = load_sensitivities('./sensitivities_file_1')
sens = merge_sensitive([sens_0, sens_1])
origin_sens = sensitivity(
eval_program,
place, ["conv4_weights"],
eval_func,
"./sensitivities_file_1",
pruned_ratios=[0.1, 0.2, 0.3, 0.4])
self.assertTrue(sens == origin_sens)
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册