diff --git a/demo/distillation/distill.py b/demo/distillation/distill.py index 23a1b18fd3b014f1acf847a2e28bb68231c63813..a180c3c9449531e4372503d01ef022ec6fccdd5b 100644 --- a/demo/distillation/distill.py +++ b/demo/distillation/distill.py @@ -121,6 +121,7 @@ def compress(args): feed_list=[image, label], drop_last=True, batch_size=args.batch_size, + return_list=False, shuffle=True, use_shared_memory=False, num_workers=1) @@ -129,6 +130,7 @@ def compress(args): places=place, feed_list=[image, label], drop_last=False, + return_list=False, use_shared_memory=False, batch_size=args.batch_size, shuffle=False) @@ -217,7 +219,7 @@ def compress(args): format(epoch_id, step_id, val_loss[0], val_acc1[0], val_acc5[0])) if args.save_inference: - paddle.static.save_inference_model( + paddle.fluid.io.save_inference_model( os.path.join("./saved_models", str(epoch_id)), ["image"], [out], exe, student_program) _logger.info("epoch {} top1 {:.6f}, top5 {:.6f}".format( diff --git a/demo/prune/eval.py b/demo/prune/eval.py index 5448fe90083d0fd221fea8e7f8ec51fd3b1233d3..82f294f9915de5de6d5d1a3f831a8a1b737917c8 100644 --- a/demo/prune/eval.py +++ b/demo/prune/eval.py @@ -65,6 +65,7 @@ def eval(args): places=place, feed_list=[image, label], drop_last=False, + return_list=False, batch_size=args.batch_size, shuffle=False) diff --git a/demo/prune/train.py b/demo/prune/train.py index 581cbfc3d0c75746d7f60afe2fda0be7036c9f8d..138bf55c5072cdb21b34d3d78947b69593b0f89a 100644 --- a/demo/prune/train.py +++ b/demo/prune/train.py @@ -150,6 +150,7 @@ def compress(args): drop_last=True, batch_size=args.batch_size, shuffle=True, + return_list=False, use_shared_memory=False, num_workers=16) valid_loader = paddle.io.DataLoader( @@ -157,6 +158,7 @@ def compress(args): places=place, feed_list=[image, label], drop_last=False, + return_list=False, use_shared_memory=False, batch_size=args.batch_size, shuffle=False) diff --git a/demo/quant/pact_quant_aware/train.py b/demo/quant/pact_quant_aware/train.py index 414165575924840267042486f0a9f61e42eb1709..67babd4f1b0acd7c6da416731752b8b106658b2b 100644 --- a/demo/quant/pact_quant_aware/train.py +++ b/demo/quant/pact_quant_aware/train.py @@ -426,7 +426,7 @@ def compress(args): if not os.path.isdir(model_path): os.makedirs(model_path) - paddle.static.save_inference_model( + paddle.fluid.io.save_inference_model( dirname=float_path, feeded_var_names=[image.name], target_vars=[out], diff --git a/demo/quant/quant_aware/train.py b/demo/quant/quant_aware/train.py index 85010b14d782f095d50032bb29e8cb382dac8209..0aa5937754dd076794d7c8bb432144d4806b1848 100644 --- a/demo/quant/quant_aware/train.py +++ b/demo/quant/quant_aware/train.py @@ -168,6 +168,7 @@ def compress(args): feed_list=[image, label], drop_last=True, batch_size=args.batch_size, + return_list=False, use_shared_memory=False, shuffle=True, num_workers=1) @@ -176,6 +177,7 @@ def compress(args): places=place, feed_list=[image, label], drop_last=False, + return_list=False, batch_size=args.batch_size, use_shared_memory=False, shuffle=False) @@ -277,7 +279,7 @@ def compress(args): if not os.path.isdir(model_path): os.makedirs(model_path) - paddle.static.save_inference_model( + paddle.fluid.io.save_inference_model( dirname=float_path, feeded_var_names=[image.name], target_vars=[out], diff --git a/demo/quant/quant_post/eval.py b/demo/quant/quant_post/eval.py index 1d15eaa9f07cf9eaea3bd9ee5ce77f0c6ca66c57..a0d8c76390b5c03ff5c8adafc72678dbb176ddc8 100755 --- a/demo/quant/quant_post/eval.py +++ b/demo/quant/quant_post/eval.py @@ -40,7 +40,7 @@ def eval(args): place = paddle.CUDAPlace(0) if args.use_gpu else paddle.CPUPlace() exe = paddle.static.Executor(place) - val_program, feed_target_names, fetch_targets = paddle.static.load_inference_model( + val_program, feed_target_names, fetch_targets = paddle.fluid.io.load_inference_model( args.model_path, exe, model_filename=args.model_name, diff --git a/demo/quant/quant_post/export_model.py b/demo/quant/quant_post/export_model.py index a83ddc644956eed04818f38c430a02bd89ca24a3..e8b16db54cb3b93b8ef2b9b2f67524e2b4407847 100755 --- a/demo/quant/quant_post/export_model.py +++ b/demo/quant/quant_post/export_model.py @@ -62,7 +62,7 @@ def export_model(args): else: assert False, "args.pretrained_model must set" - paddle.static.save_inference_model( + paddle.fluid.io.save_inference_model( './inference_model/' + args.model, feeded_var_names=[image.name], target_vars=[out], diff --git a/tests/test_analysis_helper.py b/tests/test_analysis_helper.py index 24d926f5271d76278c674640d7152ecc3821737c..ba5397053411bb895678979efc320b47d483b8ce 100644 --- a/tests/test_analysis_helper.py +++ b/tests/test_analysis_helper.py @@ -56,6 +56,7 @@ class TestAnalysisHelper(StaticCase): places=places, feed_list=[image, label], drop_last=True, + return_list=False, batch_size=64) exe.run(paddle.static.default_startup_program()) diff --git a/tests/test_quant_aware.py b/tests/test_quant_aware.py index 84952f228c2fdd72c83dc0ff3880eb5a5792b4d4..a587c7692d2682e9d8bd34d8d69988650f0d79bb 100644 --- a/tests/test_quant_aware.py +++ b/tests/test_quant_aware.py @@ -124,9 +124,14 @@ class TestQuantAwareCase2(StaticCase): places=place, feed_list=[image, label], drop_last=True, + return_list=False, batch_size=64) valid_loader = paddle.io.DataLoader( - test_dataset, places=place, feed_list=[image, label], batch_size=64) + test_dataset, + places=place, + feed_list=[image, label], + batch_size=64, + return_list=False) def train(program): iter = 0 diff --git a/tests/test_quant_aware_user_defined.py b/tests/test_quant_aware_user_defined.py index e741a1a500083e20251b888214bb26c5f78fd786..de8fd627fb4eea10effcb39fef545a3bb0d6822d 100644 --- a/tests/test_quant_aware_user_defined.py +++ b/tests/test_quant_aware_user_defined.py @@ -96,9 +96,14 @@ class TestQuantAwareCase1(StaticCase): places=place, feed_list=[image, label], drop_last=True, + return_list=False, batch_size=64) valid_loader = paddle.io.DataLoader( - test_dataset, places=place, feed_list=[image, label], batch_size=64) + test_dataset, + places=place, + feed_list=[image, label], + batch_size=64, + return_list=False) def train(program): iter = 0 diff --git a/tests/test_quant_post.py b/tests/test_quant_post.py index a841baf1393f450bf952d0deccaa9d544213017d..85072666acb364fda5ff1d717e18b3bea4124fa8 100644 --- a/tests/test_quant_post.py +++ b/tests/test_quant_post.py @@ -60,9 +60,14 @@ class TestQuantAwareCase1(StaticCase): places=place, feed_list=[image, label], drop_last=True, + return_list=False, batch_size=64) valid_loader = paddle.io.DataLoader( - test_dataset, places=place, feed_list=[image, label], batch_size=64) + test_dataset, + places=place, + feed_list=[image, label], + batch_size=64, + return_list=False) def train(program): iter = 0 @@ -97,7 +102,7 @@ class TestQuantAwareCase1(StaticCase): train(main_prog) top1_1, top5_1 = test(val_prog) - paddle.static.save_inference_model( + paddle.fluid.io.save_inference_model( dirname='./test_quant_post', feeded_var_names=[image.name, label.name], target_vars=[avg_cost, acc_top1, acc_top5], @@ -114,7 +119,7 @@ class TestQuantAwareCase1(StaticCase): model_filename='model', params_filename='params', batch_nums=10) - quant_post_prog, feed_target_names, fetch_targets = paddle.static.load_inference_model( + quant_post_prog, feed_target_names, fetch_targets = paddle.fluid.io.load_inference_model( dirname='./test_quant_post_inference', executor=exe, model_filename='__model__', diff --git a/tests/test_quant_post_only_weight.py b/tests/test_quant_post_only_weight.py index ebc110498891280c5d1076d5b76c5131934b3caa..2860c05c2fbb6c74baeb764e5ec100023abe80cb 100644 --- a/tests/test_quant_post_only_weight.py +++ b/tests/test_quant_post_only_weight.py @@ -60,9 +60,14 @@ class TestQuantPostOnlyWeightCase1(StaticCase): places=place, feed_list=[image, label], drop_last=True, + return_list=False, batch_size=64) valid_loader = paddle.io.DataLoader( - test_dataset, places=place, feed_list=[image, label], batch_size=64) + test_dataset, + places=place, + feed_list=[image, label], + batch_size=64, + return_list=False) def train(program): iter = 0 @@ -97,7 +102,7 @@ class TestQuantPostOnlyWeightCase1(StaticCase): train(main_prog) top1_1, top5_1 = test(val_prog) - paddle.static.save_inference_model( + paddle.fluid.io.save_inference_model( dirname='./test_quant_post_dynamic', feeded_var_names=[image.name, label.name], target_vars=[avg_cost, acc_top1, acc_top5], @@ -112,7 +117,7 @@ class TestQuantPostOnlyWeightCase1(StaticCase): model_filename='model', params_filename='params', generate_test_model=True) - quant_post_prog, feed_target_names, fetch_targets = paddle.static.load_inference_model( + quant_post_prog, feed_target_names, fetch_targets = paddle.fluid.io.load_inference_model( dirname='./test_quant_post_inference/test_model', executor=exe) top1_2, top5_2 = test(quant_post_prog, fetch_targets) print("before quantization: top1: {}, top5: {}".format(top1_1, top5_1))