提交 bb22da58 编写于 作者: W wanghaoshuang

Add sensitive pruner.

上级 638d3323
import os
import sys
import logging
import paddle
import argparse
import functools
import math
import time
import numpy as np
import paddle.fluid as fluid
from paddleslim.prune import SensitivePruner
from paddleslim.common import get_logger
from paddleslim.analysis import flops
sys.path.append(sys.path[0] + "/../")
import models
from utility import add_arguments, print_arguments
_logger = get_logger(__name__, level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 64 * 4, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('model', str, "MobileNet", "The target model.")
add_arg('pretrained_model', str, "../pretrained_model/MobileNetV1_pretained", "Whether to use pretrained model.")
add_arg('lr', float, 0.1, "The learning rate used to fine-tune pruned model.")
add_arg('lr_strategy', str, "piecewise_decay", "The learning rate decay strategy.")
add_arg('l2_decay', float, 3e-5, "The l2_decay parameter.")
add_arg('momentum_rate', float, 0.9, "The value of momentum_rate.")
add_arg('num_epochs', int, 120, "The number of total epochs.")
add_arg('total_images', int, 1281167, "The number of total training images.")
parser.add_argument('--step_epochs', nargs='+', type=int, default=[30, 60, 90], help="piecewise decay step")
add_arg('config_file', str, None, "The config file for compression with yaml format.")
add_arg('data', str, "mnist", "Which data to use. 'mnist' or 'imagenet'")
add_arg('log_period', int, 10, "Log period in batches.")
add_arg('test_period', int, 10, "Test period in epoches.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def piecewise_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
bd = [step * e for e in args.step_epochs]
lr = [args.lr * (0.1**i) for i in range(len(bd) + 1)]
learning_rate = fluid.layers.piecewise_decay(boundaries=bd, values=lr)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def cosine_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
learning_rate = fluid.layers.cosine_decay(
learning_rate=args.lr, step_each_epoch=step, epochs=args.num_epochs)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def create_optimizer(args):
if args.lr_strategy == "piecewise_decay":
return piecewise_decay(args)
elif args.lr_strategy == "cosine_decay":
return cosine_decay(args)
def compress(args):
train_reader = None
test_reader = None
if args.data == "mnist":
import paddle.dataset.mnist as reader
train_reader = reader.train()
val_reader = reader.test()
class_dim = 10
image_shape = "1,28,28"
elif args.data == "imagenet":
import imagenet_reader as reader
train_reader = reader.train()
val_reader = reader.val()
class_dim = 1000
image_shape = "3,224,224"
else:
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
val_program = fluid.default_main_program().clone(for_test=True)
opt = create_optimizer(args)
opt.minimize(avg_cost)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
return os.path.exists(
os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(val_reader, batch_size=args.batch_size)
train_reader = paddle.batch(
train_reader, batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
val_feeder = feeder = fluid.DataFeeder(
[image, label], place, program=val_program)
def test(epoch, program):
batch_id = 0
acc_top1_ns = []
acc_top5_ns = []
for data in val_reader():
start_time = time.time()
acc_top1_n, acc_top5_n = exe.run(
program,
feed=train_feeder.feed(data),
fetch_list=[acc_top1.name, acc_top5.name])
end_time = time.time()
if batch_id % args.log_period == 0:
_logger.info(
"Eval epoch[{}] batch[{}] - acc_top1: {:.3f}; acc_top5: {:.3f}; time: {:.3f}".
format(epoch, batch_id,
np.mean(acc_top1_n),
np.mean(acc_top5_n), end_time - start_time))
acc_top1_ns.append(np.mean(acc_top1_n))
acc_top5_ns.append(np.mean(acc_top5_n))
batch_id += 1
_logger.info(
"Final eval epoch[{}] - acc_top1: {:.3f}; acc_top5: {:.3f}".format(
epoch,
np.mean(np.array(acc_top1_ns)), np.mean(
np.array(acc_top5_ns))))
return np.mean(np.array(acc_top1_ns))
def train(epoch, program):
build_strategy = fluid.BuildStrategy()
exec_strategy = fluid.ExecutionStrategy()
train_program = fluid.compiler.CompiledProgram(
program).with_data_parallel(
loss_name=avg_cost.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
batch_id = 0
for data in train_reader():
start_time = time.time()
loss_n, acc_top1_n, acc_top5_n = exe.run(
train_program,
feed=train_feeder.feed(data),
fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name])
end_time = time.time()
loss_n = np.mean(loss_n)
acc_top1_n = np.mean(acc_top1_n)
acc_top5_n = np.mean(acc_top5_n)
if batch_id % args.log_period == 0:
_logger.info(
"epoch[{}]-batch[{}] - loss: {:.3f}; acc_top1: {:.3f}; acc_top5: {:.3f}; time: {:.3f}".
format(epoch, batch_id, loss_n, acc_top1_n, acc_top5_n,
end_time - start_time))
batch_id += 1
params = []
for param in fluid.default_main_program().global_block().all_parameters():
if "_sep_weights" in param.name:
params.append(param.name)
def eval_func(program):
return test(0, program)
pruner = SensitivePruner(place, eval_func)
if args.data == "mnist":
train(0, fluid.default_main_program())
pruned_program = fluid.default_main_program()
pruned_val_program = val_program
for iter in range(6):
pruned_program, pruned_val_program = pruner.prune(
pruned_program, pruned_val_program, params, 0.1)
train(iter, pruned_program)
test(iter, pruned_val_program)
print("before flops: {}".format(flops(fluid.default_main_program())))
print("after flops: {}".format(flops(pruned_val_program)))
def main():
args = parser.parse_args()
print_arguments(args)
compress(args)
if __name__ == '__main__':
main()
......@@ -17,6 +17,7 @@ import os
import logging
import pickle
import numpy as np
import paddle.fluid as fluid
from ..core import GraphWrapper
from ..common import get_logger
from ..prune import Pruner
......@@ -80,7 +81,7 @@ def sensitivity(program,
param_t = scope.find_var(param_name).get_tensor()
param_t.set(param_backup[param_name], place)
ratio += step_size
return sensitivities
return sensitivities
def _load_sensitivities(sensitivities_file):
......
......@@ -23,7 +23,7 @@ import controller_client
from controller_client import *
import lock_utils
from lock_utils import *
import cached_reader
import cached_reader as cached_reader_module
from cached_reader import *
__all__ = []
......@@ -32,4 +32,4 @@ __all__ += sa_controller.__all__
__all__ += controller_server.__all__
__all__ += controller_client.__all__
__all__ += lock_utils.__all__
__all__ += cached_reader.__all__
__all__ += cached_reader_module.__all__
......@@ -12,8 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import copy
from scipy.optimize import leastsq
import numpy as np
import paddle.fluid as fluid
from ..common import get_logger
from ..analysis import sensitivity
from ..analysis import flops
from .pruner import Pruner
__all__ = ["SensitivePruner"]
......@@ -22,12 +29,31 @@ _logger = get_logger(__name__, level=logging.INFO)
class SensitivePruner(object):
def __init__(self, place, eval_func, scope=None):
"""
Pruner used to prune parameters iteratively according to sensitivities of parameters in each step.
Args:
place(fluid.CUDAPlace | fluid.CPUPlace): The device place where program execute.
eval_func(function): A callback function used to evaluate pruned program. The argument of this function is pruned program. And it return a score of given program.
scope(fluid.scope): The scope used to execute program.
"""
self._eval_func = eval_func
self._iter = 0
self._place = place
self._scope = fluid.global_scope() if scope is None else scope
self._pruner = Pruner()
def prune(self, train_program, eval_program, params, pruned_flops):
"""
Pruning parameters of training and evaluation network by sensitivities in current step.
Args:
train_program(fluid.Program): The training program to be pruned.
eval_program(fluid.Program): The evaluation program to be pruned. And it is also used to calculate sensitivities of parameters.
params(list<str>): The parameters to be pruned.
pruned_flops(float): The ratio of FLOPS to be pruned in current step.
Return:
tuple: A tuple of pruned training program and pruned evaluation program.
"""
_logger.info("Pruning: {}".format(params))
sensitivities_file = "sensitivities_iter{}.data".format(self._iter)
with fluid.scope_guard(self._scope):
sensitivities = sensitivity(
......@@ -37,8 +63,9 @@ class SensitivePruner(object):
self._eval_func,
sensitivities_file=sensitivities_file,
step_size=0.1)
ratios = self._get_ratios_by_sensitive(sensitivities, pruned_flops,
eval_program)
print sensitivities
_, ratios = self._get_ratios_by_sensitive(sensitivities, pruned_flops,
eval_program)
pruned_program = self._pruner.prune(
train_program,
......@@ -91,13 +118,13 @@ class SensitivePruner(object):
max_loss = np.max([max_loss, loss])
# step 2: Find a group of ratios by binary searching.
flops = flops(eval_program)
base_flops = flops(eval_program)
ratios = []
pruner = Pruner()
while min_loss < max_loss:
max_times = 20
while min_loss < max_loss and max_times > 0:
loss = (max_loss + min_loss) / 2
_logger.info(
'-----------Try pruned ratios while acc loss={:.4f}-----------'.
'-----------Try pruned ratios while acc loss={}-----------'.
format(loss))
ratios = []
# step 2.1: Get ratios according to current loss
......@@ -114,22 +141,22 @@ class SensitivePruner(object):
[round(ratio, 3) for ratio in ratios]))
# step 2.2: Pruning by current ratios
param_shape_backup = {}
pruned_program = pruner.prune(
pruned_program = self._pruner.prune(
eval_program,
None, # scope
sensitivities.keys(),
ratios,
None, # place
only_graph=True)
pruned_flops = 1 - (flops(pruned_program) / flops)
_logger.info('Pruned flops: {:.4f}'.format(pruned_flops))
pruned_ratio = 1 - (float(flops(pruned_program)) / base_flops)
_logger.info('Pruned flops: {:.4f}'.format(pruned_ratio))
# step 2.3: Check whether current ratios is enough
if abs(pruned_flops - target_ratio) < 0.015:
if abs(pruned_ratio - pruned_flops) < 0.015:
break
if pruned_flops > target_ratio:
if pruned_ratio > pruned_flops:
max_loss = loss
else:
min_loss = loss
max_times -= 1
return sensitivities.keys(), ratios
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册