提交 339a1ef6 编写于 作者: W wanghaoshuang

Fix format.

上级 638128d9
......@@ -34,12 +34,12 @@ add_arg('config_file', str, None, "The config file for comp
model_list = [m for m in dir(models) if "__" not in m]
ratiolist = [
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
def save_model(args, exe, train_prog, eval_prog,info):
def save_model(args, exe, train_prog, eval_prog, info):
model_path = os.path.join(args.model_save_dir, args.model, str(info))
if not os.path.isdir(model_path):
os.makedirs(model_path)
......@@ -58,29 +58,31 @@ def piecewise_decay(args):
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def cosine_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
learning_rate = fluid.layers.cosine_decay(
learning_rate=args.lr,
step_each_epoch=step,
epochs=args.num_epochs)
learning_rate=args.lr, step_each_epoch=step, epochs=args.num_epochs)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def create_optimizer(args):
if args.lr_strategy == "piecewise_decay":
return piecewise_decay(args)
elif args.lr_strategy == "cosine_decay":
return cosine_decay(args)
def compress(args):
class_dim=1000
image_shape="3,224,224"
class_dim = 1000
image_shape = "3,224,224"
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list)
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
......@@ -98,10 +100,13 @@ def compress(args):
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
exist = os.path.exists(os.path.join(args.pretrained_model, var.name))
print("exist",exist)
exist = os.path.exists(
os.path.join(args.pretrained_model, var.name))
print("exist", exist)
return exist
#fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
......@@ -109,7 +114,8 @@ def compress(args):
reader.train(), batch_size=args.batch_size, drop_last=True)
train_feeder = feeder = fluid.DataFeeder([image, label], place)
val_feeder = feeder = fluid.DataFeeder([image, label], place, program=val_program)
val_feeder = feeder = fluid.DataFeeder(
[image, label], place, program=val_program)
def test(epoch, program):
batch_id = 0
......@@ -117,17 +123,23 @@ def compress(args):
acc_top5_ns = []
for data in val_reader():
start_time = time.time()
acc_top1_n, acc_top5_n = exe.run(program,
acc_top1_n, acc_top5_n = exe.run(
program,
feed=train_feeder.feed(data),
fetch_list=[acc_top1.name, acc_top5.name])
end_time = time.time()
print("Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".format(epoch, batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n), end_time-start_time))
print(
"Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".
format(epoch, batch_id,
np.mean(acc_top1_n),
np.mean(acc_top5_n), end_time - start_time))
acc_top1_ns.append(np.mean(acc_top1_n))
acc_top5_ns.append(np.mean(acc_top5_n))
batch_id += 1
print("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(epoch, np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
print("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(
epoch,
np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
def train(epoch, program):
......@@ -142,15 +154,22 @@ def compress(args):
batch_id = 0
for data in train_reader():
start_time = time.time()
loss_n, acc_top1_n, acc_top5_n,lr_n = exe.run(train_program,
loss_n, acc_top1_n, acc_top5_n, lr_n = exe.run(
train_program,
feed=train_feeder.feed(data),
fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name,"learning_rate"])
fetch_list=[
avg_cost.name, acc_top1.name, acc_top5.name,
"learning_rate"
])
end_time = time.time()
loss_n = np.mean(loss_n)
acc_top1_n = np.mean(acc_top1_n)
acc_top5_n = np.mean(acc_top5_n)
lr_n = np.mean(lr_n)
print("epoch[{}]-batch[{}] - loss: {}; acc_top1: {}; acc_top5: {};lrn: {}; time: {}".format(epoch, batch_id, loss_n, acc_top1_n, acc_top5_n, lr_n,end_time-start_time))
print(
"epoch[{}]-batch[{}] - loss: {}; acc_top1: {}; acc_top5: {};lrn: {}; time: {}".
format(epoch, batch_id, loss_n, acc_top1_n, acc_top5_n, lr_n,
end_time - start_time))
batch_id += 1
params = []
......@@ -158,39 +177,45 @@ def compress(args):
#if "_weights" in param.name and "conv1_weights" not in param.name:
if "_sep_weights" in param.name:
params.append(param.name)
print("fops before pruning: {}".format(flops(fluid.default_main_program())))
print("fops before pruning: {}".format(
flops(fluid.default_main_program())))
pruned_program_iter = fluid.default_main_program()
pruned_val_program_iter = val_program
for ratios in ratiolist:
pruner = Pruner()
pruned_val_program_iter = pruner.prune(pruned_val_program_iter,
pruned_val_program_iter = pruner.prune(
pruned_val_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place,
only_graph=True)
pruned_program_iter = pruner.prune(pruned_program_iter,
pruned_program_iter = pruner.prune(
pruned_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place)
print("fops after pruning: {}".format(flops(pruned_program_iter)))
""" do not inherit learning rate """
if(os.path.exists(args.pretrained_model + "/learning_rate")):
os.remove( args.pretrained_model + "/learning_rate")
if(os.path.exists(args.pretrained_model + "/@LR_DECAY_COUNTER@")):
os.remove( args.pretrained_model + "/@LR_DECAY_COUNTER@")
fluid.io.load_vars(exe, args.pretrained_model , main_program = pruned_program_iter, predicate=if_exist)
if (os.path.exists(args.pretrained_model + "/learning_rate")):
os.remove(args.pretrained_model + "/learning_rate")
if (os.path.exists(args.pretrained_model + "/@LR_DECAY_COUNTER@")):
os.remove(args.pretrained_model + "/@LR_DECAY_COUNTER@")
fluid.io.load_vars(
exe,
args.pretrained_model,
main_program=pruned_program_iter,
predicate=if_exist)
pruned_program = pruned_program_iter
pruned_val_program = pruned_val_program_iter
for i in range(args.num_epochs):
train(i, pruned_program)
test(i, pruned_val_program)
save_model(args,exe,pruned_program,pruned_val_program,i)
save_model(args, exe, pruned_program, pruned_val_program, i)
def main():
args = parser.parse_args()
......
......@@ -41,9 +41,10 @@ add_arg('test_period', int, 10, "Test period in epoches.")
model_list = [m for m in dir(models) if "__" not in m]
ratiolist = [
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
# [0.06, 0.0, 0.09, 0.03, 0.09, 0.02, 0.05, 0.03, 0.0, 0.07, 0.07, 0.05, 0.08],
# [0.08, 0.02, 0.03, 0.13, 0.1, 0.06, 0.03, 0.04, 0.14, 0.02, 0.03, 0.02, 0.01],
]
def piecewise_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
......@@ -194,21 +195,26 @@ def compress(args):
for ratios in ratiolist:
pruner = Pruner()
pruned_val_program_iter = pruner.prune(pruned_val_program_iter,
pruned_val_program_iter = pruner.prune(
pruned_val_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place,
only_graph=True)
pruned_program_iter = pruner.prune(pruned_program_iter,
pruned_program_iter = pruner.prune(
pruned_program_iter,
fluid.global_scope(),
params=params,
ratios=ratios,
place=place)
print("fops after pruning: {}".format(flops(pruned_program_iter)))
fluid.io.load_vars(exe, args.pretrained_model , main_program = pruned_program_iter, predicate=if_exist)
fluid.io.load_vars(
exe,
args.pretrained_model,
main_program=pruned_program_iter,
predicate=if_exist)
pruner = AutoPruner(
pruned_val_program_iter,
......@@ -238,8 +244,6 @@ def compress(args):
pruner.reward(score)
def main():
args = parser.parse_args()
print_arguments(args)
......
......@@ -99,8 +99,8 @@ def exponential_decay_with_warmup(learning_rate,
(step_each_epoch * warmup_epoch))
fluid.layers.assign(input=decayed_lr, output=lr)
with switch.default():
div_res = (global_step - warmup_epoch * step_each_epoch
) / decay_epochs
div_res = (
global_step - warmup_epoch * step_each_epoch) / decay_epochs
div_res = ops.floor(div_res)
decayed_lr = learning_rate * (decay_rate**div_res)
fluid.layers.assign(input=decayed_lr, output=lr)
......
......@@ -36,7 +36,8 @@ logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
DOWNLOAD_RETRY_LIMIT=3
DOWNLOAD_RETRY_LIMIT = 3
def print_arguments(args):
"""Print argparse's arguments.
......@@ -211,6 +212,7 @@ def _download(url, path, md5sum=None):
return fullname
def _md5check(fullname, md5sum=None):
if md5sum is None:
return True
......@@ -228,6 +230,7 @@ def _md5check(fullname, md5sum=None):
return False
return True
def _decompress(fname):
"""
Decompress for zip and tar file
......@@ -261,6 +264,7 @@ def _decompress(fname):
shutil.rmtree(fpath_tmp)
os.remove(fname)
def _move_and_merge_tree(src, dst):
"""
Move src directory to dst, if dst is already exists,
......
......@@ -19,6 +19,7 @@ from paddleslim.nas import SANAS
from paddleslim.analysis import flops
import numpy as np
def compute_op_num(program):
params = {}
ch_list = []
......@@ -29,14 +30,16 @@ def compute_op_num(program):
ch_list.append(int(param.shape[0]))
return params, ch_list
class TestSANAS(unittest.TestCase):
def setUp(self):
self.init_test_case()
port = np.random.randint(8337, 8773)
self.sanas = SANAS(configs=self.configs, server_addr=("", port), save_checkpoint=None)
self.sanas = SANAS(
configs=self.configs, server_addr=("", port), save_checkpoint=None)
def init_test_case(self):
self.configs=[('MobileNetV2BlockSpace', {'block_mask':[0]})]
self.configs = [('MobileNetV2BlockSpace', {'block_mask': [0]})]
self.filter_num = np.array([
3, 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 144, 160, 192, 224,
256, 320, 384, 512
......@@ -53,7 +56,10 @@ class TestSANAS(unittest.TestCase):
conv_list, ch_pro = compute_op_num(program)
### assert conv number
self.assertTrue((repeat_num * 3) == len(conv_list), "the number of conv is NOT match, the number compute from token: {}, actual conv number: {}".format(repeat_num * 3, len(conv_list)))
self.assertTrue((repeat_num * 3) == len(
conv_list
), "the number of conv is NOT match, the number compute from token: {}, actual conv number: {}".
format(repeat_num * 3, len(conv_list)))
### assert number of channels
ch_token = []
......@@ -64,7 +70,10 @@ class TestSANAS(unittest.TestCase):
ch_token.append(filter_num)
init_ch_num = filter_num
self.assertTrue(str(ch_token) == str(ch_pro), "channel num is WRONG, channel num from token is {}, channel num come fom program is {}".format(str(ch_token), str(ch_pro)))
self.assertTrue(
str(ch_token) == str(ch_pro),
"channel num is WRONG, channel num from token is {}, channel num come fom program is {}".
format(str(ch_token), str(ch_pro)))
def test_all_function(self):
### unittest for next_archs
......@@ -73,7 +82,8 @@ class TestSANAS(unittest.TestCase):
token2arch_program = fluid.Program()
with fluid.program_guard(next_program, startup_program):
inputs = fluid.data(name='input', shape=[None, 3, 32, 32], dtype='float32')
inputs = fluid.data(
name='input', shape=[None, 3, 32, 32], dtype='float32')
archs = self.sanas.next_archs()
for arch in archs:
output = arch(inputs)
......@@ -85,8 +95,10 @@ class TestSANAS(unittest.TestCase):
### uniitest for tokens2arch
with fluid.program_guard(token2arch_program, startup_program):
inputs = fluid.data(name='input', shape=[None, 3, 32, 32], dtype='float32')
arch = self.sanas.tokens2arch(self.sanas.current_info()['current_tokens'])
inputs = fluid.data(
name='input', shape=[None, 3, 32, 32], dtype='float32')
arch = self.sanas.tokens2arch(self.sanas.current_info()[
'current_tokens'])
for arch in archs:
output = arch(inputs)
inputs = output
......@@ -94,7 +106,11 @@ class TestSANAS(unittest.TestCase):
### unittest for current_info
current_info = self.sanas.current_info()
self.assertTrue(isinstance(current_info, dict), "the type of current info must be dict, but now is {}".format(type(current_info)))
self.assertTrue(
isinstance(current_info, dict),
"the type of current info must be dict, but now is {}".format(
type(current_info)))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册